Whamcloud - gitweb
LU-6245 libcfs: cleanup list handling
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/hash.c
37  *
38  * Implement a hash class for hash process in lustre system.
39  *
40  * Author: YuZhangyong <yzy@clusterfs.com>
41  *
42  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43  * - Simplified API and improved documentation
44  * - Added per-hash feature flags:
45  *   * CFS_HASH_DEBUG additional validation
46  *   * CFS_HASH_REHASH dynamic rehashing
47  * - Added per-hash statistics
48  * - General performance enhancements
49  *
50  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51  * - move all stuff to libcfs
52  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54  * - buckets are allocated one by one(instead of contiguous memory),
55  *   to avoid unnecessary cacheline conflict
56  *
57  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58  * - "bucket" is a group of hlist_head now, user can specify bucket size
59  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60  *   one lock for reducing memory overhead.
61  *
62  * - support lockless hash, caller will take care of locks:
63  *   avoid lock overhead for hash tables that are already protected
64  *   by locking in the caller for another reason
65  *
66  * - support both spin_lock/rwlock for bucket:
67  *   overhead of spinlock contention is lower than read/write
68  *   contention of rwlock, so using spinlock to serialize operations on
69  *   bucket is more reasonable for those frequently changed hash tables
70  *
71  * - support one-single lock mode:
72  *   one lock to protect all hash operations to avoid overhead of
73  *   multiple locks if hash table is always small
74  *
75  * - removed a lot of unnecessary addref & decref on hash element:
76  *   addref & decref are atomic operations in many use-cases which
77  *   are expensive.
78  *
79  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80  *   some lustre use-cases require these functions to be strictly
81  *   non-blocking, we need to schedule required rehash on a different
82  *   thread on those cases.
83  *
84  * - safer rehash on large hash table
85  *   In old implementation, rehash function will exclusively lock the
86  *   hash table and finish rehash in one batch, it's dangerous on SMP
87  *   system because rehash millions of elements could take long time.
88  *   New implemented rehash can release lock and relax CPU in middle
89  *   of rehash, it's safe for another thread to search/change on the
90  *   hash table even it's in rehasing.
91  *
92  * - support two different refcount modes
93  *   . hash table has refcount on element
94  *   . hash table doesn't change refcount on adding/removing element
95  *
96  * - support long name hash table (for param-tree)
97  *
98  * - fix a bug for cfs_hash_rehash_key:
99  *   in old implementation, cfs_hash_rehash_key could screw up the
100  *   hash-table because @key is overwritten without any protection.
101  *   Now we need user to define hs_keycpy for those rehash enabled
102  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
103  *   inside lock by calling hs_keycpy.
104  *
105  * - better hash iteration:
106  *   Now we support both locked iteration & lockless iteration of hash
107  *   table. Also, user can break the iteration by return 1 in callback.
108  */
109 #include <linux/seq_file.h>
110
111 #include <libcfs/linux/linux-list.h>
112 #include <libcfs/libcfs.h>
113
114 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
115 static unsigned int warn_on_depth = 8;
116 module_param(warn_on_depth, uint, 0644);
117 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
118 #endif
119
120 struct cfs_wi_sched *cfs_sched_rehash;
121
122 static inline void
123 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
124
125 static inline void
126 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
127
128 static inline void
129 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
130         __acquires(&lock->spin)
131 {
132         spin_lock(&lock->spin);
133 }
134
135 static inline void
136 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
137         __releases(&lock->spin)
138 {
139         spin_unlock(&lock->spin);
140 }
141
142 static inline void
143 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
144         __acquires(&lock->rw)
145 {
146         if (!exclusive)
147                 read_lock(&lock->rw);
148         else
149                 write_lock(&lock->rw);
150 }
151
152 static inline void
153 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
154         __releases(&lock->rw)
155 {
156         if (!exclusive)
157                 read_unlock(&lock->rw);
158         else
159                 write_unlock(&lock->rw);
160 }
161
162 /** No lock hash */
163 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
164         .hs_lock        = cfs_hash_nl_lock,
165         .hs_unlock      = cfs_hash_nl_unlock,
166         .hs_bkt_lock    = cfs_hash_nl_lock,
167         .hs_bkt_unlock  = cfs_hash_nl_unlock,
168 };
169
170 /** no bucket lock, one spinlock to protect everything */
171 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
172         .hs_lock        = cfs_hash_spin_lock,
173         .hs_unlock      = cfs_hash_spin_unlock,
174         .hs_bkt_lock    = cfs_hash_nl_lock,
175         .hs_bkt_unlock  = cfs_hash_nl_unlock,
176 };
177
178 /** spin bucket lock, rehash is enabled */
179 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
180         .hs_lock        = cfs_hash_rw_lock,
181         .hs_unlock      = cfs_hash_rw_unlock,
182         .hs_bkt_lock    = cfs_hash_spin_lock,
183         .hs_bkt_unlock  = cfs_hash_spin_unlock,
184 };
185
186 /** rw bucket lock, rehash is enabled */
187 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
188         .hs_lock        = cfs_hash_rw_lock,
189         .hs_unlock      = cfs_hash_rw_unlock,
190         .hs_bkt_lock    = cfs_hash_rw_lock,
191         .hs_bkt_unlock  = cfs_hash_rw_unlock,
192 };
193
194 /** spin bucket lock, rehash is disabled */
195 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
196         .hs_lock        = cfs_hash_nl_lock,
197         .hs_unlock      = cfs_hash_nl_unlock,
198         .hs_bkt_lock    = cfs_hash_spin_lock,
199         .hs_bkt_unlock  = cfs_hash_spin_unlock,
200 };
201
202 /** rw bucket lock, rehash is disabled */
203 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
204         .hs_lock        = cfs_hash_nl_lock,
205         .hs_unlock      = cfs_hash_nl_unlock,
206         .hs_bkt_lock    = cfs_hash_rw_lock,
207         .hs_bkt_unlock  = cfs_hash_rw_unlock,
208 };
209
210 static void
211 cfs_hash_lock_setup(struct cfs_hash *hs)
212 {
213         if (cfs_hash_with_no_lock(hs)) {
214                 hs->hs_lops = &cfs_hash_nl_lops;
215
216         } else if (cfs_hash_with_no_bktlock(hs)) {
217                 hs->hs_lops = &cfs_hash_nbl_lops;
218                 spin_lock_init(&hs->hs_lock.spin);
219
220         } else if (cfs_hash_with_rehash(hs)) {
221                 rwlock_init(&hs->hs_lock.rw);
222
223                 if (cfs_hash_with_rw_bktlock(hs))
224                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
225                 else if (cfs_hash_with_spin_bktlock(hs))
226                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
227                 else
228                         LBUG();
229         } else {
230                 if (cfs_hash_with_rw_bktlock(hs))
231                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
232                 else if (cfs_hash_with_spin_bktlock(hs))
233                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
234                 else
235                         LBUG();
236         }
237 }
238
239 /**
240  * Simple hash head without depth tracking
241  * new element is always added to head of hlist
242  */
243 struct cfs_hash_head {
244         struct hlist_head       hh_head;        /**< entries list */
245 };
246
247 static int
248 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
249 {
250         return sizeof(struct cfs_hash_head);
251 }
252
253 static struct hlist_head *
254 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
255 {
256         struct cfs_hash_head *head;
257
258         head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
259         return &head[bd->bd_offset].hh_head;
260 }
261
262 static int
263 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
264                       struct hlist_node *hnode)
265 {
266         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
267         return -1; /* unknown depth */
268 }
269
270 static int
271 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
272                       struct hlist_node *hnode)
273 {
274         hlist_del_init(hnode);
275         return -1; /* unknown depth */
276 }
277
278 /**
279  * Simple hash head with depth tracking
280  * new element is always added to head of hlist
281  */
282 struct cfs_hash_head_dep {
283         struct hlist_head       hd_head;        /**< entries list */
284         unsigned int            hd_depth;       /**< list length */
285 };
286
287 static int
288 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
289 {
290         return sizeof(struct cfs_hash_head_dep);
291 }
292
293 static struct hlist_head *
294 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
295 {
296         struct cfs_hash_head_dep   *head;
297
298         head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
299         return &head[bd->bd_offset].hd_head;
300 }
301
302 static int
303 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
304                       struct hlist_node *hnode)
305 {
306         struct cfs_hash_head_dep *hh;
307
308         hh = container_of(cfs_hash_hd_hhead(hs, bd),
309                           struct cfs_hash_head_dep, hd_head);
310         hlist_add_head(hnode, &hh->hd_head);
311         return ++hh->hd_depth;
312 }
313
314 static int
315 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
316                       struct hlist_node *hnode)
317 {
318         struct cfs_hash_head_dep *hh;
319
320         hh = container_of(cfs_hash_hd_hhead(hs, bd),
321                           struct cfs_hash_head_dep, hd_head);
322         hlist_del_init(hnode);
323         return --hh->hd_depth;
324 }
325
326 /**
327  * double links hash head without depth tracking
328  * new element is always added to tail of hlist
329  */
330 struct cfs_hash_dhead {
331         struct hlist_head       dh_head;        /**< entries list */
332         struct hlist_node       *dh_tail;       /**< the last entry */
333 };
334
335 static int
336 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
337 {
338         return sizeof(struct cfs_hash_dhead);
339 }
340
341 static struct hlist_head *
342 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
343 {
344         struct cfs_hash_dhead *head;
345
346         head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
347         return &head[bd->bd_offset].dh_head;
348 }
349
350 static int
351 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
352                       struct hlist_node *hnode)
353 {
354         struct cfs_hash_dhead *dh;
355
356         dh = container_of(cfs_hash_dh_hhead(hs, bd),
357                           struct cfs_hash_dhead, dh_head);
358         if (dh->dh_tail != NULL) /* not empty */
359                 hlist_add_behind(hnode, dh->dh_tail);
360         else /* empty list */
361                 hlist_add_head(hnode, &dh->dh_head);
362         dh->dh_tail = hnode;
363         return -1; /* unknown depth */
364 }
365
366 static int
367 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
368                       struct hlist_node *hnd)
369 {
370         struct cfs_hash_dhead *dh;
371
372         dh = container_of(cfs_hash_dh_hhead(hs, bd),
373                           struct cfs_hash_dhead, dh_head);
374         if (hnd->next == NULL) { /* it's the tail */
375                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
376                               container_of(hnd->pprev, struct hlist_node, next);
377         }
378         hlist_del_init(hnd);
379         return -1; /* unknown depth */
380 }
381
382 /**
383  * double links hash head with depth tracking
384  * new element is always added to tail of hlist
385  */
386 struct cfs_hash_dhead_dep {
387         struct hlist_head       dd_head;        /**< entries list */
388         struct hlist_node       *dd_tail;       /**< the last entry */
389         unsigned int            dd_depth;       /**< list length */
390 };
391
392 static int
393 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
394 {
395         return sizeof(struct cfs_hash_dhead_dep);
396 }
397
398 static struct hlist_head *
399 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
400 {
401         struct cfs_hash_dhead_dep *head;
402
403         head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
404         return &head[bd->bd_offset].dd_head;
405 }
406
407 static int
408 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
409                       struct hlist_node *hnode)
410 {
411         struct cfs_hash_dhead_dep *dh;
412
413         dh = container_of(cfs_hash_dd_hhead(hs, bd),
414                           struct cfs_hash_dhead_dep, dd_head);
415         if (dh->dd_tail != NULL) /* not empty */
416                 hlist_add_behind(hnode, dh->dd_tail);
417         else /* empty list */
418                 hlist_add_head(hnode, &dh->dd_head);
419         dh->dd_tail = hnode;
420         return ++dh->dd_depth;
421 }
422
423 static int
424 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
425                       struct hlist_node *hnd)
426 {
427         struct cfs_hash_dhead_dep *dh;
428
429         dh = container_of(cfs_hash_dd_hhead(hs, bd),
430                           struct cfs_hash_dhead_dep, dd_head);
431         if (hnd->next == NULL) { /* it's the tail */
432                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
433                               container_of(hnd->pprev, struct hlist_node, next);
434         }
435         hlist_del_init(hnd);
436         return --dh->dd_depth;
437 }
438
439 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
440        .hop_hhead      = cfs_hash_hh_hhead,
441        .hop_hhead_size = cfs_hash_hh_hhead_size,
442        .hop_hnode_add  = cfs_hash_hh_hnode_add,
443        .hop_hnode_del  = cfs_hash_hh_hnode_del,
444 };
445
446 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
447        .hop_hhead      = cfs_hash_hd_hhead,
448        .hop_hhead_size = cfs_hash_hd_hhead_size,
449        .hop_hnode_add  = cfs_hash_hd_hnode_add,
450        .hop_hnode_del  = cfs_hash_hd_hnode_del,
451 };
452
453 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
454        .hop_hhead      = cfs_hash_dh_hhead,
455        .hop_hhead_size = cfs_hash_dh_hhead_size,
456        .hop_hnode_add  = cfs_hash_dh_hnode_add,
457        .hop_hnode_del  = cfs_hash_dh_hnode_del,
458 };
459
460 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
461        .hop_hhead      = cfs_hash_dd_hhead,
462        .hop_hhead_size = cfs_hash_dd_hhead_size,
463        .hop_hnode_add  = cfs_hash_dd_hnode_add,
464        .hop_hnode_del  = cfs_hash_dd_hnode_del,
465 };
466
467 static void
468 cfs_hash_hlist_setup(struct cfs_hash *hs)
469 {
470         if (cfs_hash_with_add_tail(hs)) {
471                 hs->hs_hops = cfs_hash_with_depth(hs) ?
472                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
473         } else {
474                 hs->hs_hops = cfs_hash_with_depth(hs) ?
475                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
476         }
477 }
478
479 static void
480 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
481                      unsigned int bits, const void *key, struct cfs_hash_bd *bd)
482 {
483         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
484
485         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
486
487         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
488         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
489 }
490
491 void
492 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
493 {
494         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
495         if (likely(hs->hs_rehash_buckets == NULL)) {
496                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
497                                      hs->hs_cur_bits, key, bd);
498         } else {
499                 LASSERT(hs->hs_rehash_bits != 0);
500                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
501                                      hs->hs_rehash_bits, key, bd);
502         }
503 }
504 EXPORT_SYMBOL(cfs_hash_bd_get);
505
506 static inline void
507 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
508 {
509         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
510                 return;
511
512         bd->bd_bucket->hsb_depmax = dep_cur;
513 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
514         if (likely(warn_on_depth == 0 ||
515                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
516                 return;
517
518         spin_lock(&hs->hs_dep_lock);
519         hs->hs_dep_max  = dep_cur;
520         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
521         hs->hs_dep_off  = bd->bd_offset;
522         hs->hs_dep_bits = hs->hs_cur_bits;
523         spin_unlock(&hs->hs_dep_lock);
524
525         cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
526 # endif
527 }
528
529 void
530 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
531                         struct hlist_node *hnode)
532 {
533         int rc;
534
535         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
536         cfs_hash_bd_dep_record(hs, bd, rc);
537         bd->bd_bucket->hsb_version++;
538         if (unlikely(bd->bd_bucket->hsb_version == 0))
539                 bd->bd_bucket->hsb_version++;
540         bd->bd_bucket->hsb_count++;
541
542         if (cfs_hash_with_counter(hs))
543                 atomic_inc(&hs->hs_count);
544         if (!cfs_hash_with_no_itemref(hs))
545                 cfs_hash_get(hs, hnode);
546 }
547 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
548
549 void
550 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
551                        struct hlist_node *hnode)
552 {
553         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
554
555         LASSERT(bd->bd_bucket->hsb_count > 0);
556         bd->bd_bucket->hsb_count--;
557         bd->bd_bucket->hsb_version++;
558         if (unlikely(bd->bd_bucket->hsb_version == 0))
559                 bd->bd_bucket->hsb_version++;
560
561         if (cfs_hash_with_counter(hs)) {
562                 LASSERT(atomic_read(&hs->hs_count) > 0);
563                 atomic_dec(&hs->hs_count);
564         }
565         if (!cfs_hash_with_no_itemref(hs))
566                 cfs_hash_put_locked(hs, hnode);
567 }
568 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
569
570 void
571 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
572                         struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
573 {
574         struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
575         struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
576         int                rc;
577
578         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
579                 return;
580
581         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
582          * in cfs_hash_bd_del/add_locked */
583         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
584         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
585         cfs_hash_bd_dep_record(hs, bd_new, rc);
586
587         LASSERT(obkt->hsb_count > 0);
588         obkt->hsb_count--;
589         obkt->hsb_version++;
590         if (unlikely(obkt->hsb_version == 0))
591                 obkt->hsb_version++;
592         nbkt->hsb_count++;
593         nbkt->hsb_version++;
594         if (unlikely(nbkt->hsb_version == 0))
595                 nbkt->hsb_version++;
596 }
597
598 enum {
599         /** always set, for sanity (avoid ZERO intent) */
600         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
601         /** return entry with a ref */
602         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
603         /** add entry if not existing */
604         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
605         /** delete entry, ignore other masks */
606         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
607 };
608
609 enum cfs_hash_lookup_intent {
610         /** return item w/o refcount */
611         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
612         /** return item with refcount */
613         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
614                                        CFS_HS_LOOKUP_MASK_REF),
615         /** return item w/o refcount if existed, otherwise add */
616         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
617                                        CFS_HS_LOOKUP_MASK_ADD),
618         /** return item with refcount if existed, otherwise add */
619         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
620                                        CFS_HS_LOOKUP_MASK_ADD),
621         /** delete if existed */
622         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
623                                        CFS_HS_LOOKUP_MASK_DEL)
624 };
625
626 static struct hlist_node *
627 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
628                           const void *key, struct hlist_node *hnode,
629                           enum cfs_hash_lookup_intent intent)
630
631 {
632         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
633         struct hlist_node  *ehnode;
634         struct hlist_node  *match;
635         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
636
637         /* with this function, we can avoid a lot of useless refcount ops,
638          * which are expensive atomic operations most time. */
639         match = intent_add ? NULL : hnode;
640         hlist_for_each(ehnode, hhead) {
641                 if (!cfs_hash_keycmp(hs, key, ehnode))
642                         continue;
643
644                 if (match != NULL && match != ehnode) /* can't match */
645                         continue;
646
647                 /* match and ... */
648                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
649                         cfs_hash_bd_del_locked(hs, bd, ehnode);
650                         return ehnode;
651                 }
652
653                 /* caller wants refcount? */
654                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
655                         cfs_hash_get(hs, ehnode);
656                 return ehnode;
657         }
658         /* no match item */
659         if (!intent_add)
660                 return NULL;
661
662         LASSERT(hnode != NULL);
663         cfs_hash_bd_add_locked(hs, bd, hnode);
664         return hnode;
665 }
666
667 struct hlist_node *
668 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
669                           const void *key)
670 {
671         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
672                                         CFS_HS_LOOKUP_IT_FIND);
673 }
674 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
675
676 struct hlist_node *
677 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
678                         const void *key)
679 {
680         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
681                                         CFS_HS_LOOKUP_IT_PEEK);
682 }
683 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
684
685 static void
686 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
687                        unsigned n, int excl)
688 {
689         struct cfs_hash_bucket *prev = NULL;
690         int                i;
691
692         /**
693          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
694          * NB: it's possible that several bds point to the same bucket but
695          * have different bd::bd_offset, so need take care of deadlock.
696          */
697         cfs_hash_for_each_bd(bds, n, i) {
698                 if (prev == bds[i].bd_bucket)
699                         continue;
700
701                 LASSERT(prev == NULL ||
702                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
703                 cfs_hash_bd_lock(hs, &bds[i], excl);
704                 prev = bds[i].bd_bucket;
705         }
706 }
707
708 static void
709 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
710                          unsigned n, int excl)
711 {
712         struct cfs_hash_bucket *prev = NULL;
713         int                i;
714
715         cfs_hash_for_each_bd(bds, n, i) {
716                 if (prev != bds[i].bd_bucket) {
717                         cfs_hash_bd_unlock(hs, &bds[i], excl);
718                         prev = bds[i].bd_bucket;
719                 }
720         }
721 }
722
723 static struct hlist_node *
724 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
725                                 unsigned n, const void *key)
726 {
727         struct hlist_node *ehnode;
728         unsigned          i;
729
730         cfs_hash_for_each_bd(bds, n, i) {
731                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
732                                                         CFS_HS_LOOKUP_IT_FIND);
733                 if (ehnode != NULL)
734                         return ehnode;
735         }
736         return NULL;
737 }
738
739 static struct hlist_node *
740 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
741                                  unsigned n, const void *key,
742                                  struct hlist_node *hnode, int noref)
743 {
744         struct hlist_node *ehnode;
745         int               intent;
746         unsigned          i;
747
748         LASSERT(hnode != NULL);
749         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
750
751         cfs_hash_for_each_bd(bds, n, i) {
752                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
753                                                    NULL, intent);
754                 if (ehnode != NULL)
755                         return ehnode;
756         }
757
758         if (i == 1) { /* only one bucket */
759                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
760         } else {
761                 struct cfs_hash_bd      mybd;
762
763                 cfs_hash_bd_get(hs, key, &mybd);
764                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
765         }
766
767         return hnode;
768 }
769
770 static struct hlist_node *
771 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
772                                  unsigned n, const void *key,
773                                  struct hlist_node *hnode)
774 {
775         struct hlist_node *ehnode;
776         unsigned           i;
777
778         cfs_hash_for_each_bd(bds, n, i) {
779                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
780                                                    CFS_HS_LOOKUP_IT_FINDDEL);
781                 if (ehnode != NULL)
782                         return ehnode;
783         }
784         return NULL;
785 }
786
787 static void
788 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
789 {
790         int     rc;
791
792         if (bd2->bd_bucket == NULL)
793                 return;
794
795         if (bd1->bd_bucket == NULL) {
796                 *bd1 = *bd2;
797                 bd2->bd_bucket = NULL;
798                 return;
799         }
800
801         rc = cfs_hash_bd_compare(bd1, bd2);
802         if (rc == 0) {
803                 bd2->bd_bucket = NULL;
804
805         } else if (rc > 0) { /* swab bd1 and bd2 */
806                 struct cfs_hash_bd tmp;
807
808                 tmp = *bd2;
809                 *bd2 = *bd1;
810                 *bd1 = tmp;
811         }
812 }
813
814 void
815 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
816                      struct cfs_hash_bd *bds)
817 {
818         /* NB: caller should hold hs_lock.rw if REHASH is set */
819         cfs_hash_bd_from_key(hs, hs->hs_buckets,
820                              hs->hs_cur_bits, key, &bds[0]);
821         if (likely(hs->hs_rehash_buckets == NULL)) {
822                 /* no rehash or not rehashing */
823                 bds[1].bd_bucket = NULL;
824                 return;
825         }
826
827         LASSERT(hs->hs_rehash_bits != 0);
828         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
829                              hs->hs_rehash_bits, key, &bds[1]);
830
831         cfs_hash_bd_order(&bds[0], &bds[1]);
832 }
833
834 void
835 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
836 {
837         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
838 }
839
840 void
841 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
842 {
843         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
844 }
845
846 struct hlist_node *
847 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
848                                const void *key)
849 {
850         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
851 }
852
853 struct hlist_node *
854 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
855                                 const void *key, struct hlist_node *hnode,
856                                 int noref)
857 {
858         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
859                                                 hnode, noref);
860 }
861
862 struct hlist_node *
863 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
864                                 const void *key, struct hlist_node *hnode)
865 {
866         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
867 }
868
869 static void
870 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
871                       int bkt_size, int prev_size, int size)
872 {
873         int     i;
874
875         for (i = prev_size; i < size; i++) {
876                 if (buckets[i] != NULL)
877                         LIBCFS_FREE(buckets[i], bkt_size);
878         }
879
880         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
881 }
882
883 /*
884  * Create or grow bucket memory. Return old_buckets if no allocation was
885  * needed, the newly allocated buckets if allocation was needed and
886  * successful, and NULL on error.
887  */
888 static struct cfs_hash_bucket **
889 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
890                          unsigned int old_size, unsigned int new_size)
891 {
892         struct cfs_hash_bucket **new_bkts;
893         int                 i;
894
895         LASSERT(old_size == 0 || old_bkts != NULL);
896
897         if (old_bkts != NULL && old_size == new_size)
898                 return old_bkts;
899
900         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
901         if (new_bkts == NULL)
902                 return NULL;
903
904         if (old_bkts != NULL) {
905                 memcpy(new_bkts, old_bkts,
906                        min(old_size, new_size) * sizeof(*old_bkts));
907         }
908
909         for (i = old_size; i < new_size; i++) {
910                 struct hlist_head *hhead;
911                 struct cfs_hash_bd     bd;
912
913                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
914                 if (new_bkts[i] == NULL) {
915                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
916                                               old_size, new_size);
917                         return NULL;
918                 }
919
920                 new_bkts[i]->hsb_index   = i;
921                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
922                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
923                 bd.bd_bucket = new_bkts[i];
924                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
925                         INIT_HLIST_HEAD(hhead);
926
927                 if (cfs_hash_with_no_lock(hs) ||
928                     cfs_hash_with_no_bktlock(hs))
929                         continue;
930
931                 if (cfs_hash_with_rw_bktlock(hs))
932                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
933                 else if (cfs_hash_with_spin_bktlock(hs))
934                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
935                 else
936                         LBUG(); /* invalid use-case */
937         }
938         return new_bkts;
939 }
940
941 /**
942  * Initialize new libcfs hash, where:
943  * @name     - Descriptive hash name
944  * @cur_bits - Initial hash table size, in bits
945  * @max_bits - Maximum allowed hash table resize, in bits
946  * @ops      - Registered hash table operations
947  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
948  *           - CFS_HASH_SORT enable chained hash sort
949  */
950 static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
951
952 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
953 static int cfs_hash_dep_print(struct cfs_workitem *wi)
954 {
955         struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
956         int         dep;
957         int         bkt;
958         int         off;
959         int         bits;
960
961         spin_lock(&hs->hs_dep_lock);
962         dep  = hs->hs_dep_max;
963         bkt  = hs->hs_dep_bkt;
964         off  = hs->hs_dep_off;
965         bits = hs->hs_dep_bits;
966         spin_unlock(&hs->hs_dep_lock);
967
968         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
969                       hs->hs_name, bits, dep, bkt, off);
970         spin_lock(&hs->hs_dep_lock);
971         hs->hs_dep_bits = 0; /* mark as workitem done */
972         spin_unlock(&hs->hs_dep_lock);
973         return 0;
974 }
975
976 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
977 {
978         spin_lock_init(&hs->hs_dep_lock);
979         cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
980 }
981
982 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
983 {
984         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
985                 return;
986
987         spin_lock(&hs->hs_dep_lock);
988         while (hs->hs_dep_bits != 0) {
989                 spin_unlock(&hs->hs_dep_lock);
990                 cond_resched();
991                 spin_lock(&hs->hs_dep_lock);
992         }
993         spin_unlock(&hs->hs_dep_lock);
994 }
995
996 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
997
998 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
999 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1000
1001 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1002
1003 struct cfs_hash *
1004 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1005                 unsigned bkt_bits, unsigned extra_bytes,
1006                 unsigned min_theta, unsigned max_theta,
1007                 struct cfs_hash_ops *ops, unsigned flags)
1008 {
1009         struct cfs_hash *hs;
1010         int         len;
1011
1012         ENTRY;
1013
1014         CLASSERT(CFS_HASH_THETA_BITS < 15);
1015
1016         LASSERT(name != NULL);
1017         LASSERT(ops != NULL);
1018         LASSERT(ops->hs_key);
1019         LASSERT(ops->hs_hash);
1020         LASSERT(ops->hs_object);
1021         LASSERT(ops->hs_keycmp);
1022         LASSERT(ops->hs_get != NULL);
1023         LASSERT(ops->hs_put != NULL || ops->hs_put_locked != NULL);
1024
1025         if ((flags & CFS_HASH_REHASH) != 0)
1026                 flags |= CFS_HASH_COUNTER; /* must have counter */
1027
1028         LASSERT(cur_bits > 0);
1029         LASSERT(cur_bits >= bkt_bits);
1030         LASSERT(max_bits >= cur_bits && max_bits < 31);
1031         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1032         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1033                      (flags & CFS_HASH_NO_LOCK) == 0));
1034         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1035                       ops->hs_keycpy != NULL));
1036
1037         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1038               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1039         LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1040         if (hs == NULL)
1041                 RETURN(NULL);
1042
1043         strlcpy(hs->hs_name, name, len);
1044         hs->hs_flags = flags;
1045
1046         atomic_set(&hs->hs_refcount, 1);
1047         atomic_set(&hs->hs_count, 0);
1048
1049         cfs_hash_lock_setup(hs);
1050         cfs_hash_hlist_setup(hs);
1051
1052         hs->hs_cur_bits = (__u8)cur_bits;
1053         hs->hs_min_bits = (__u8)cur_bits;
1054         hs->hs_max_bits = (__u8)max_bits;
1055         hs->hs_bkt_bits = (__u8)bkt_bits;
1056
1057         hs->hs_ops         = ops;
1058         hs->hs_extra_bytes = extra_bytes;
1059         hs->hs_rehash_bits = 0;
1060         cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1061         cfs_hash_depth_wi_init(hs);
1062
1063         if (cfs_hash_with_rehash(hs))
1064                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1065
1066         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1067                                                   CFS_HASH_NBKT(hs));
1068         if (hs->hs_buckets != NULL)
1069                 return hs;
1070
1071         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1072         RETURN(NULL);
1073 }
1074 EXPORT_SYMBOL(cfs_hash_create);
1075
1076 /**
1077  * Cleanup libcfs hash @hs.
1078  */
1079 static void
1080 cfs_hash_destroy(struct cfs_hash *hs)
1081 {
1082         struct hlist_node     *hnode;
1083         struct hlist_node     *pos;
1084         struct cfs_hash_bd         bd;
1085         int                   i;
1086         ENTRY;
1087
1088         LASSERT(hs != NULL);
1089         LASSERT(!cfs_hash_is_exiting(hs) &&
1090                 !cfs_hash_is_iterating(hs));
1091
1092         /**
1093          * prohibit further rehashes, don't need any lock because
1094          * I'm the only (last) one can change it.
1095          */
1096         hs->hs_exiting = 1;
1097         if (cfs_hash_with_rehash(hs))
1098                 cfs_hash_rehash_cancel(hs);
1099
1100         cfs_hash_depth_wi_cancel(hs);
1101         /* rehash should be done/canceled */
1102         LASSERT(hs->hs_buckets != NULL &&
1103                 hs->hs_rehash_buckets == NULL);
1104
1105         cfs_hash_for_each_bucket(hs, &bd, i) {
1106                 struct hlist_head *hhead;
1107
1108                 LASSERT(bd.bd_bucket != NULL);
1109                 /* no need to take this lock, just for consistent code */
1110                 cfs_hash_bd_lock(hs, &bd, 1);
1111
1112                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1113                         hlist_for_each_safe(hnode, pos, hhead) {
1114                                         LASSERTF(!cfs_hash_with_assert_empty(hs),
1115                                         "hash %s bucket %u(%u) is not "
1116                                         " empty: %u items left\n",
1117                                         hs->hs_name, bd.bd_bucket->hsb_index,
1118                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1119                                 /* can't assert key valicate, because we
1120                                  * can interrupt rehash */
1121                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1122                                 cfs_hash_exit(hs, hnode);
1123                         }
1124                 }
1125                 LASSERT(bd.bd_bucket->hsb_count == 0);
1126                 cfs_hash_bd_unlock(hs, &bd, 1);
1127                 cond_resched();
1128         }
1129
1130         LASSERT(atomic_read(&hs->hs_count) == 0);
1131
1132         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1133                               0, CFS_HASH_NBKT(hs));
1134         i = cfs_hash_with_bigname(hs) ?
1135             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1136         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1137
1138         EXIT;
1139 }
1140
1141 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1142 {
1143         if (atomic_inc_not_zero(&hs->hs_refcount))
1144                 return hs;
1145         return NULL;
1146 }
1147 EXPORT_SYMBOL(cfs_hash_getref);
1148
1149 void cfs_hash_putref(struct cfs_hash *hs)
1150 {
1151         if (atomic_dec_and_test(&hs->hs_refcount))
1152                 cfs_hash_destroy(hs);
1153 }
1154 EXPORT_SYMBOL(cfs_hash_putref);
1155
1156 static inline int
1157 cfs_hash_rehash_bits(struct cfs_hash *hs)
1158 {
1159         if (cfs_hash_with_no_lock(hs) ||
1160             !cfs_hash_with_rehash(hs))
1161                 return -EOPNOTSUPP;
1162
1163         if (unlikely(cfs_hash_is_exiting(hs)))
1164                 return -ESRCH;
1165
1166         if (unlikely(cfs_hash_is_rehashing(hs)))
1167                 return -EALREADY;
1168
1169         if (unlikely(cfs_hash_is_iterating(hs)))
1170                 return -EAGAIN;
1171
1172         /* XXX: need to handle case with max_theta != 2.0
1173          *      and the case with min_theta != 0.5 */
1174         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1175             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1176                 return hs->hs_cur_bits + 1;
1177
1178         if (!cfs_hash_with_shrink(hs))
1179                 return 0;
1180
1181         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1182             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1183                 return hs->hs_cur_bits - 1;
1184
1185         return 0;
1186 }
1187
1188 /**
1189  * don't allow inline rehash if:
1190  * - user wants non-blocking change (add/del) on hash table
1191  * - too many elements
1192  */
1193 static inline int
1194 cfs_hash_rehash_inline(struct cfs_hash *hs)
1195 {
1196         return !cfs_hash_with_nblk_change(hs) &&
1197                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1198 }
1199
1200 /**
1201  * Add item @hnode to libcfs hash @hs using @key.  The registered
1202  * ops->hs_get function will be called when the item is added.
1203  */
1204 void
1205 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1206 {
1207         struct cfs_hash_bd   bd;
1208         int             bits;
1209
1210         LASSERT(hlist_unhashed(hnode));
1211
1212         cfs_hash_lock(hs, 0);
1213         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1214
1215         cfs_hash_key_validate(hs, key, hnode);
1216         cfs_hash_bd_add_locked(hs, &bd, hnode);
1217
1218         cfs_hash_bd_unlock(hs, &bd, 1);
1219
1220         bits = cfs_hash_rehash_bits(hs);
1221         cfs_hash_unlock(hs, 0);
1222         if (bits > 0)
1223                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1224 }
1225 EXPORT_SYMBOL(cfs_hash_add);
1226
1227 static struct hlist_node *
1228 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1229                      struct hlist_node *hnode, int noref)
1230 {
1231         struct hlist_node *ehnode;
1232         struct cfs_hash_bd     bds[2];
1233         int               bits = 0;
1234
1235         LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
1236
1237         cfs_hash_lock(hs, 0);
1238         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1239
1240         cfs_hash_key_validate(hs, key, hnode);
1241         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1242                                                  hnode, noref);
1243         cfs_hash_dual_bd_unlock(hs, bds, 1);
1244
1245         if (ehnode == hnode) /* new item added */
1246                 bits = cfs_hash_rehash_bits(hs);
1247         cfs_hash_unlock(hs, 0);
1248         if (bits > 0)
1249                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1250
1251         return ehnode;
1252 }
1253
1254 /**
1255  * Add item @hnode to libcfs hash @hs using @key.  The registered
1256  * ops->hs_get function will be called if the item was added.
1257  * Returns 0 on success or -EALREADY on key collisions.
1258  */
1259 int
1260 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1261                     struct hlist_node *hnode)
1262 {
1263         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1264                -EALREADY : 0;
1265 }
1266 EXPORT_SYMBOL(cfs_hash_add_unique);
1267
1268 /**
1269  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1270  * already exists in the hash then ops->hs_get will be called on the
1271  * conflicting entry and that entry will be returned to the caller.
1272  * Otherwise ops->hs_get is called on the item which was added.
1273  */
1274 void *
1275 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1276                         struct hlist_node *hnode)
1277 {
1278         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1279
1280         return cfs_hash_object(hs, hnode);
1281 }
1282 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1283
1284 /**
1285  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1286  * is required to ensure the correct hash bucket is locked since there
1287  * is no direct linkage from the item to the bucket.  The object
1288  * removed from the hash will be returned and obs->hs_put is called
1289  * on the removed object.
1290  */
1291 void *
1292 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1293 {
1294         void           *obj  = NULL;
1295         int             bits = 0;
1296         struct cfs_hash_bd   bds[2];
1297
1298         cfs_hash_lock(hs, 0);
1299         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1300
1301         /* NB: do nothing if @hnode is not in hash table */
1302         if (hnode == NULL || !hlist_unhashed(hnode)) {
1303                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1304                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1305                 } else {
1306                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1307                                                                 key, hnode);
1308                 }
1309         }
1310
1311         if (hnode != NULL) {
1312                 obj  = cfs_hash_object(hs, hnode);
1313                 bits = cfs_hash_rehash_bits(hs);
1314         }
1315
1316         cfs_hash_dual_bd_unlock(hs, bds, 1);
1317         cfs_hash_unlock(hs, 0);
1318         if (bits > 0)
1319                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1320
1321         return obj;
1322 }
1323 EXPORT_SYMBOL(cfs_hash_del);
1324
1325 /**
1326  * Delete item given @key in libcfs hash @hs.  The first @key found in
1327  * the hash will be removed, if the key exists multiple times in the hash
1328  * @hs this function must be called once per key.  The removed object
1329  * will be returned and ops->hs_put is called on the removed object.
1330  */
1331 void *
1332 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1333 {
1334         return cfs_hash_del(hs, key, NULL);
1335 }
1336 EXPORT_SYMBOL(cfs_hash_del_key);
1337
1338 /**
1339  * Lookup an item using @key in the libcfs hash @hs and return it.
1340  * If the @key is found in the hash hs->hs_get() is called and the
1341  * matching objects is returned.  It is the callers responsibility
1342  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1343  * when when finished with the object.  If the @key was not found
1344  * in the hash @hs NULL is returned.
1345  */
1346 void *
1347 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1348 {
1349         void                 *obj = NULL;
1350         struct hlist_node     *hnode;
1351         struct cfs_hash_bd         bds[2];
1352
1353         cfs_hash_lock(hs, 0);
1354         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1355
1356         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1357         if (hnode != NULL)
1358                 obj = cfs_hash_object(hs, hnode);
1359
1360         cfs_hash_dual_bd_unlock(hs, bds, 0);
1361         cfs_hash_unlock(hs, 0);
1362
1363         return obj;
1364 }
1365 EXPORT_SYMBOL(cfs_hash_lookup);
1366
1367 static void
1368 cfs_hash_for_each_enter(struct cfs_hash *hs)
1369 {
1370         LASSERT(!cfs_hash_is_exiting(hs));
1371
1372         if (!cfs_hash_with_rehash(hs))
1373                 return;
1374         /*
1375          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1376          * because it's just an unreliable signal to rehash-thread,
1377          * rehash-thread will try to finish rehash ASAP when seeing this.
1378          */
1379         hs->hs_iterating = 1;
1380
1381         cfs_hash_lock(hs, 1);
1382         hs->hs_iterators++;
1383
1384         /* NB: iteration is mostly called by service thread,
1385          * we tend to cancel pending rehash-request, instead of
1386          * blocking service thread, we will relaunch rehash request
1387          * after iteration */
1388         if (cfs_hash_is_rehashing(hs))
1389                 cfs_hash_rehash_cancel_locked(hs);
1390         cfs_hash_unlock(hs, 1);
1391 }
1392
1393 static void
1394 cfs_hash_for_each_exit(struct cfs_hash *hs)
1395 {
1396         int remained;
1397         int bits;
1398
1399         if (!cfs_hash_with_rehash(hs))
1400                 return;
1401         cfs_hash_lock(hs, 1);
1402         remained = --hs->hs_iterators;
1403         bits = cfs_hash_rehash_bits(hs);
1404         cfs_hash_unlock(hs, 1);
1405         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1406         if (remained == 0)
1407                 hs->hs_iterating = 0;
1408         if (bits > 0) {
1409                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1410                                     CFS_HASH_LOOP_HOG);
1411         }
1412 }
1413
1414 /**
1415  * For each item in the libcfs hash @hs call the passed callback @func
1416  * and pass to it as an argument each hash item and the private @data.
1417  *
1418  * a) the function may sleep!
1419  * b) during the callback:
1420  *    . the bucket lock is held so the callback must never sleep.
1421  *    . if @removal_safe is true, use can remove current item by
1422  *      cfs_hash_bd_del_locked
1423  */
1424 static __u64
1425 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1426                         void *data, int remove_safe)
1427 {
1428         struct hlist_node       *hnode;
1429         struct hlist_node       *pos;
1430         struct cfs_hash_bd      bd;
1431         __u64                   count = 0;
1432         int                     excl  = !!remove_safe;
1433         int                     loop  = 0;
1434         int                     i;
1435         ENTRY;
1436
1437         cfs_hash_for_each_enter(hs);
1438
1439         cfs_hash_lock(hs, 0);
1440         LASSERT(!cfs_hash_is_rehashing(hs));
1441
1442         cfs_hash_for_each_bucket(hs, &bd, i) {
1443                 struct hlist_head *hhead;
1444
1445                 cfs_hash_bd_lock(hs, &bd, excl);
1446                 if (func == NULL) { /* only glimpse size */
1447                         count += bd.bd_bucket->hsb_count;
1448                         cfs_hash_bd_unlock(hs, &bd, excl);
1449                         continue;
1450                 }
1451
1452                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1453                         hlist_for_each_safe(hnode, pos, hhead) {
1454                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1455                                 count++;
1456                                 loop++;
1457                                 if (func(hs, &bd, hnode, data)) {
1458                                         cfs_hash_bd_unlock(hs, &bd, excl);
1459                                         goto out;
1460                                 }
1461                         }
1462                 }
1463                 cfs_hash_bd_unlock(hs, &bd, excl);
1464                 if (loop < CFS_HASH_LOOP_HOG)
1465                         continue;
1466                 loop = 0;
1467                 cfs_hash_unlock(hs, 0);
1468                 cond_resched();
1469                 cfs_hash_lock(hs, 0);
1470         }
1471  out:
1472         cfs_hash_unlock(hs, 0);
1473
1474         cfs_hash_for_each_exit(hs);
1475         RETURN(count);
1476 }
1477
1478 struct cfs_hash_cond_arg {
1479         cfs_hash_cond_opt_cb_t  func;
1480         void                   *arg;
1481 };
1482
1483 static int
1484 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1485                          struct hlist_node *hnode, void *data)
1486 {
1487         struct cfs_hash_cond_arg *cond = data;
1488
1489         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1490                 cfs_hash_bd_del_locked(hs, bd, hnode);
1491         return 0;
1492 }
1493
1494 /**
1495  * Delete item from the libcfs hash @hs when @func return true.
1496  * The write lock being hold during loop for each bucket to avoid
1497  * any object be reference.
1498  */
1499 void
1500 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1501 {
1502         struct cfs_hash_cond_arg arg = {
1503                 .func   = func,
1504                 .arg    = data,
1505         };
1506
1507         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1508 }
1509 EXPORT_SYMBOL(cfs_hash_cond_del);
1510
1511 void
1512 cfs_hash_for_each(struct cfs_hash *hs,
1513                   cfs_hash_for_each_cb_t func, void *data)
1514 {
1515         cfs_hash_for_each_tight(hs, func, data, 0);
1516 }
1517 EXPORT_SYMBOL(cfs_hash_for_each);
1518
1519 void
1520 cfs_hash_for_each_safe(struct cfs_hash *hs,
1521                        cfs_hash_for_each_cb_t func, void *data)
1522 {
1523         cfs_hash_for_each_tight(hs, func, data, 1);
1524 }
1525 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1526
1527 static int
1528 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1529               struct hlist_node *hnode, void *data)
1530 {
1531         *(int *)data = 0;
1532         return 1; /* return 1 to break the loop */
1533 }
1534
1535 int
1536 cfs_hash_is_empty(struct cfs_hash *hs)
1537 {
1538         int empty = 1;
1539
1540         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1541         return empty;
1542 }
1543 EXPORT_SYMBOL(cfs_hash_is_empty);
1544
1545 __u64
1546 cfs_hash_size_get(struct cfs_hash *hs)
1547 {
1548         return cfs_hash_with_counter(hs) ?
1549                atomic_read(&hs->hs_count) :
1550                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1551 }
1552 EXPORT_SYMBOL(cfs_hash_size_get);
1553
1554 /*
1555  * cfs_hash_for_each_relax:
1556  * Iterate the hash table and call @func on each item without
1557  * any lock. This function can't guarantee to finish iteration
1558  * if these features are enabled:
1559  *
1560  *  a. if rehash_key is enabled, an item can be moved from
1561  *     one bucket to another bucket
1562  *  b. user can remove non-zero-ref item from hash-table,
1563  *     so the item can be removed from hash-table, even worse,
1564  *     it's possible that user changed key and insert to another
1565  *     hash bucket.
1566  * there's no way for us to finish iteration correctly on previous
1567  * two cases, so iteration has to be stopped on change.
1568  */
1569 static int
1570 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1571                         void *data, int start)
1572 {
1573         struct hlist_node       *hnode;
1574         struct hlist_node       *next = NULL;
1575         struct cfs_hash_bd      bd;
1576         __u32                   version;
1577         int                     count = 0;
1578         int                     stop_on_change;
1579         int                     has_put_locked;
1580         int                     rc = 0;
1581         int                     i, end = -1;
1582         ENTRY;
1583
1584         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1585                          !cfs_hash_with_no_itemref(hs);
1586         has_put_locked = hs->hs_ops->hs_put_locked != NULL;
1587         cfs_hash_lock(hs, 0);
1588 again:
1589         LASSERT(!cfs_hash_is_rehashing(hs));
1590
1591         cfs_hash_for_each_bucket(hs, &bd, i) {
1592                 struct hlist_head *hhead;
1593
1594                 if (i < start)
1595                         continue;
1596                 else if (end > 0 && i >= end)
1597                         break;
1598
1599                 cfs_hash_bd_lock(hs, &bd, 0);
1600                 version = cfs_hash_bd_version_get(&bd);
1601
1602                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1603                         hnode = hhead->first;
1604                         if (hnode == NULL)
1605                                 continue;
1606                         cfs_hash_get(hs, hnode);
1607                         for (; hnode != NULL; hnode = next) {
1608                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1609                                 next = hnode->next;
1610                                 if (next != NULL)
1611                                         cfs_hash_get(hs, next);
1612                                 cfs_hash_bd_unlock(hs, &bd, 0);
1613                                 cfs_hash_unlock(hs, 0);
1614
1615                                 rc = func(hs, &bd, hnode, data);
1616                                 if (stop_on_change || !has_put_locked)
1617                                         cfs_hash_put(hs, hnode);
1618
1619                                 cond_resched();
1620                                 count++;
1621
1622                                 cfs_hash_lock(hs, 0);
1623                                 cfs_hash_bd_lock(hs, &bd, 0);
1624                                 if (stop_on_change) {
1625                                         if (version !=
1626                                             cfs_hash_bd_version_get(&bd))
1627                                                 rc = -EINTR;
1628                                 } else if (has_put_locked) {
1629                                         cfs_hash_put_locked(hs, hnode);
1630                                 }
1631                                 if (rc) /* callback wants to break iteration */
1632                                         break;
1633                         }
1634                         if (next != NULL) {
1635                                 if (has_put_locked) {
1636                                         cfs_hash_put_locked(hs, next);
1637                                         next = NULL;
1638                                 }
1639                                 break;
1640                         } else if (rc != 0) {
1641                                 break;
1642                         }
1643                 }
1644                 cfs_hash_bd_unlock(hs, &bd, 0);
1645                 if (next != NULL && !has_put_locked) {
1646                         cfs_hash_put(hs, next);
1647                         next = NULL;
1648                 }
1649                 if (rc) /* callback wants to break iteration */
1650                         break;
1651         }
1652
1653         if (start > 0 && rc == 0) {
1654                 end = start;
1655                 start = 0;
1656                 goto again;
1657         }
1658
1659         cfs_hash_unlock(hs, 0);
1660         return count;
1661 }
1662
1663 int
1664 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1665                          cfs_hash_for_each_cb_t func, void *data, int start)
1666 {
1667         ENTRY;
1668
1669         if (cfs_hash_with_no_lock(hs) ||
1670             cfs_hash_with_rehash_key(hs) ||
1671             !cfs_hash_with_no_itemref(hs))
1672                 RETURN(-EOPNOTSUPP);
1673
1674         if (hs->hs_ops->hs_get == NULL ||
1675            (hs->hs_ops->hs_put == NULL &&
1676             hs->hs_ops->hs_put_locked == NULL))
1677                 RETURN(-EOPNOTSUPP);
1678
1679         cfs_hash_for_each_enter(hs);
1680         cfs_hash_for_each_relax(hs, func, data, start);
1681         cfs_hash_for_each_exit(hs);
1682
1683         RETURN(0);
1684 }
1685 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1686
1687 /**
1688  * For each hash bucket in the libcfs hash @hs call the passed callback
1689  * @func until all the hash buckets are empty.  The passed callback @func
1690  * or the previously registered callback hs->hs_put must remove the item
1691  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1692  * functions.  No rwlocks will be held during the callback @func it is
1693  * safe to sleep if needed.  This function will not terminate until the
1694  * hash is empty.  Note it is still possible to concurrently add new
1695  * items in to the hash.  It is the callers responsibility to ensure
1696  * the required locking is in place to prevent concurrent insertions.
1697  */
1698 int
1699 cfs_hash_for_each_empty(struct cfs_hash *hs,
1700                         cfs_hash_for_each_cb_t func, void *data)
1701 {
1702         unsigned  i = 0;
1703         ENTRY;
1704
1705         if (cfs_hash_with_no_lock(hs))
1706                 return -EOPNOTSUPP;
1707
1708         if (hs->hs_ops->hs_get == NULL ||
1709            (hs->hs_ops->hs_put == NULL &&
1710             hs->hs_ops->hs_put_locked == NULL))
1711                 return -EOPNOTSUPP;
1712
1713         cfs_hash_for_each_enter(hs);
1714         while (cfs_hash_for_each_relax(hs, func, data, 0)) {
1715                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1716                        hs->hs_name, i++);
1717         }
1718         cfs_hash_for_each_exit(hs);
1719         RETURN(0);
1720 }
1721 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1722
1723 void
1724 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1725                         cfs_hash_for_each_cb_t func, void *data)
1726 {
1727         struct hlist_head *hhead;
1728         struct hlist_node *hnode;
1729         struct cfs_hash_bd         bd;
1730
1731         cfs_hash_for_each_enter(hs);
1732         cfs_hash_lock(hs, 0);
1733         if (hindex >= CFS_HASH_NHLIST(hs))
1734                 goto out;
1735
1736         cfs_hash_bd_index_set(hs, hindex, &bd);
1737
1738         cfs_hash_bd_lock(hs, &bd, 0);
1739         hhead = cfs_hash_bd_hhead(hs, &bd);
1740         hlist_for_each(hnode, hhead) {
1741                 if (func(hs, &bd, hnode, data))
1742                         break;
1743         }
1744         cfs_hash_bd_unlock(hs, &bd, 0);
1745 out:
1746         cfs_hash_unlock(hs, 0);
1747         cfs_hash_for_each_exit(hs);
1748 }
1749
1750 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1751
1752 /*
1753  * For each item in the libcfs hash @hs which matches the @key call
1754  * the passed callback @func and pass to it as an argument each hash
1755  * item and the private @data. During the callback the bucket lock
1756  * is held so the callback must never sleep.
1757    */
1758 void
1759 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1760                         cfs_hash_for_each_cb_t func, void *data)
1761 {
1762         struct hlist_node *hnode;
1763         struct cfs_hash_bd         bds[2];
1764         unsigned           i;
1765
1766         cfs_hash_lock(hs, 0);
1767
1768         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1769
1770         cfs_hash_for_each_bd(bds, 2, i) {
1771                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1772
1773                 hlist_for_each(hnode, hlist) {
1774                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1775
1776                         if (cfs_hash_keycmp(hs, key, hnode)) {
1777                                 if (func(hs, &bds[i], hnode, data))
1778                                         break;
1779                         }
1780                 }
1781         }
1782
1783         cfs_hash_dual_bd_unlock(hs, bds, 0);
1784         cfs_hash_unlock(hs, 0);
1785 }
1786 EXPORT_SYMBOL(cfs_hash_for_each_key);
1787
1788 /**
1789  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1790  * to grow the hash size when excessive chaining is detected, or to
1791  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1792  * flag is set in @hs the libcfs hash may be dynamically rehashed
1793  * during addition or removal if the hash's theta value exceeds
1794  * either the hs->hs_min_theta or hs->max_theta values.  By default
1795  * these values are tuned to keep the chained hash depth small, and
1796  * this approach assumes a reasonably uniform hashing function.  The
1797  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1798  */
1799 void
1800 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1801 {
1802         int     i;
1803
1804         /* need hold cfs_hash_lock(hs, 1) */
1805         LASSERT(cfs_hash_with_rehash(hs) &&
1806                 !cfs_hash_with_no_lock(hs));
1807
1808         if (!cfs_hash_is_rehashing(hs))
1809                 return;
1810
1811         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1812                 hs->hs_rehash_bits = 0;
1813                 return;
1814         }
1815
1816         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1817                 cfs_hash_unlock(hs, 1);
1818                 /* raise console warning while waiting too long */
1819                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1820                        "hash %s is still rehashing, rescheded %d\n",
1821                        hs->hs_name, i - 1);
1822                 cond_resched();
1823                 cfs_hash_lock(hs, 1);
1824         }
1825 }
1826
1827 void
1828 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1829 {
1830         cfs_hash_lock(hs, 1);
1831         cfs_hash_rehash_cancel_locked(hs);
1832         cfs_hash_unlock(hs, 1);
1833 }
1834
1835 int
1836 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1837 {
1838         int     rc;
1839
1840         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1841
1842         cfs_hash_lock(hs, 1);
1843
1844         rc = cfs_hash_rehash_bits(hs);
1845         if (rc <= 0) {
1846                 cfs_hash_unlock(hs, 1);
1847                 return rc;
1848         }
1849
1850         hs->hs_rehash_bits = rc;
1851         if (!do_rehash) {
1852                 /* launch and return */
1853                 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1854                 cfs_hash_unlock(hs, 1);
1855                 return 0;
1856         }
1857
1858         /* rehash right now */
1859         cfs_hash_unlock(hs, 1);
1860
1861         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1862 }
1863
1864 static int
1865 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1866 {
1867         struct cfs_hash_bd      new;
1868         struct hlist_head *hhead;
1869         struct hlist_node *hnode;
1870         struct hlist_node *pos;
1871         void              *key;
1872         int                c = 0;
1873
1874         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1875         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1876                 hlist_for_each_safe(hnode, pos, hhead) {
1877                         key = cfs_hash_key(hs, hnode);
1878                         LASSERT(key != NULL);
1879                         /* Validate hnode is in the correct bucket. */
1880                         cfs_hash_bucket_validate(hs, old, hnode);
1881                         /*
1882                          * Delete from old hash bucket; move to new bucket.
1883                          * ops->hs_key must be defined.
1884                          */
1885                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1886                                              hs->hs_rehash_bits, key, &new);
1887                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1888                         c++;
1889                 }
1890         }
1891         return c;
1892 }
1893
1894 static int
1895 cfs_hash_rehash_worker(struct cfs_workitem *wi)
1896 {
1897         struct cfs_hash         *hs =
1898                 container_of(wi, struct cfs_hash, hs_rehash_wi);
1899         struct cfs_hash_bucket **bkts;
1900         struct cfs_hash_bd      bd;
1901         unsigned int            old_size;
1902         unsigned int            new_size;
1903         int                     bsize;
1904         int                     count = 0;
1905         int                     rc = 0;
1906         int                     i;
1907
1908         LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
1909
1910         cfs_hash_lock(hs, 0);
1911         LASSERT(cfs_hash_is_rehashing(hs));
1912
1913         old_size = CFS_HASH_NBKT(hs);
1914         new_size = CFS_HASH_RH_NBKT(hs);
1915
1916         cfs_hash_unlock(hs, 0);
1917
1918         /*
1919          * don't need hs::hs_rwlock for hs::hs_buckets,
1920          * because nobody can change bkt-table except me.
1921          */
1922         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1923                                         old_size, new_size);
1924         cfs_hash_lock(hs, 1);
1925         if (bkts == NULL) {
1926                 rc = -ENOMEM;
1927                 goto out;
1928         }
1929
1930         if (bkts == hs->hs_buckets) {
1931                 bkts = NULL; /* do nothing */
1932                 goto out;
1933         }
1934
1935         rc = __cfs_hash_theta(hs);
1936         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1937                 /* free the new allocated bkt-table */
1938                 old_size = new_size;
1939                 new_size = CFS_HASH_NBKT(hs);
1940                 rc = -EALREADY;
1941                 goto out;
1942         }
1943
1944         LASSERT(hs->hs_rehash_buckets == NULL);
1945         hs->hs_rehash_buckets = bkts;
1946
1947         rc = 0;
1948         cfs_hash_for_each_bucket(hs, &bd, i) {
1949                 if (cfs_hash_is_exiting(hs)) {
1950                         rc = -ESRCH;
1951                         /* someone wants to destroy the hash, abort now */
1952                         if (old_size < new_size) /* OK to free old bkt-table */
1953                                 break;
1954                         /* it's shrinking, need free new bkt-table */
1955                         hs->hs_rehash_buckets = NULL;
1956                         old_size = new_size;
1957                         new_size = CFS_HASH_NBKT(hs);
1958                         goto out;
1959                 }
1960
1961                 count += cfs_hash_rehash_bd(hs, &bd);
1962                 if (count < CFS_HASH_LOOP_HOG ||
1963                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1964                         continue;
1965                 }
1966
1967                 count = 0;
1968                 cfs_hash_unlock(hs, 1);
1969                 cond_resched();
1970                 cfs_hash_lock(hs, 1);
1971         }
1972
1973         hs->hs_rehash_count++;
1974
1975         bkts = hs->hs_buckets;
1976         hs->hs_buckets = hs->hs_rehash_buckets;
1977         hs->hs_rehash_buckets = NULL;
1978
1979         hs->hs_cur_bits = hs->hs_rehash_bits;
1980  out:
1981         hs->hs_rehash_bits = 0;
1982         if (rc == -ESRCH) /* never be scheduled again */
1983                 cfs_wi_exit(cfs_sched_rehash, wi);
1984         bsize = cfs_hash_bkt_size(hs);
1985         cfs_hash_unlock(hs, 1);
1986         /* can't refer to @hs anymore because it could be destroyed */
1987         if (bkts != NULL)
1988                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1989         if (rc != 0)
1990                 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1991         /* return 1 only if cfs_wi_exit is called */
1992         return rc == -ESRCH;
1993 }
1994
1995 /**
1996  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1997  * @old_key must be provided to locate the objects previous location
1998  * in the hash, and the @new_key will be used to reinsert the object.
1999  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
2000  * combo when it is critical that there is no window in time where the
2001  * object is missing from the hash.  When an object is being rehashed
2002  * the registered cfs_hash_get() and cfs_hash_put() functions will
2003  * not be called.
2004  */
2005 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
2006                          void *new_key, struct hlist_node *hnode)
2007 {
2008         struct cfs_hash_bd        bds[3];
2009         struct cfs_hash_bd        old_bds[2];
2010         struct cfs_hash_bd        new_bd;
2011
2012         LASSERT(!hlist_unhashed(hnode));
2013
2014         cfs_hash_lock(hs, 0);
2015
2016         cfs_hash_dual_bd_get(hs, old_key, old_bds);
2017         cfs_hash_bd_get(hs, new_key, &new_bd);
2018
2019         bds[0] = old_bds[0];
2020         bds[1] = old_bds[1];
2021         bds[2] = new_bd;
2022
2023         /* NB: bds[0] and bds[1] are ordered already */
2024         cfs_hash_bd_order(&bds[1], &bds[2]);
2025         cfs_hash_bd_order(&bds[0], &bds[1]);
2026
2027         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2028         if (likely(old_bds[1].bd_bucket == NULL)) {
2029                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2030         } else {
2031                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2032                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2033         }
2034         /* overwrite key inside locks, otherwise may screw up with
2035          * other operations, i.e: rehash */
2036         cfs_hash_keycpy(hs, hnode, new_key);
2037
2038         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2039         cfs_hash_unlock(hs, 0);
2040 }
2041 EXPORT_SYMBOL(cfs_hash_rehash_key);
2042
2043 void cfs_hash_debug_header(struct seq_file *m)
2044 {
2045         seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
2046                    CFS_HASH_BIGNAME_LEN, "name");
2047 }
2048 EXPORT_SYMBOL(cfs_hash_debug_header);
2049
2050 static struct cfs_hash_bucket **
2051 cfs_hash_full_bkts(struct cfs_hash *hs)
2052 {
2053         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2054         if (hs->hs_rehash_buckets == NULL)
2055                 return hs->hs_buckets;
2056
2057         LASSERT(hs->hs_rehash_bits != 0);
2058         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2059                hs->hs_rehash_buckets : hs->hs_buckets;
2060 }
2061
2062 static unsigned int
2063 cfs_hash_full_nbkt(struct cfs_hash *hs)
2064 {
2065         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2066         if (hs->hs_rehash_buckets == NULL)
2067                 return CFS_HASH_NBKT(hs);
2068
2069         LASSERT(hs->hs_rehash_bits != 0);
2070         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2071                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2072 }
2073
2074 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2075 {
2076         int dist[8] = { 0, };
2077         int maxdep = -1;
2078         int maxdepb = -1;
2079         int total = 0;
2080         int theta;
2081         int i;
2082
2083         cfs_hash_lock(hs, 0);
2084         theta = __cfs_hash_theta(hs);
2085
2086         seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2087                    CFS_HASH_BIGNAME_LEN, hs->hs_name,
2088                    1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2089                    1 << hs->hs_max_bits,
2090                    __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2091                    __cfs_hash_theta_int(hs->hs_min_theta),
2092                    __cfs_hash_theta_frac(hs->hs_min_theta),
2093                    __cfs_hash_theta_int(hs->hs_max_theta),
2094                    __cfs_hash_theta_frac(hs->hs_max_theta),
2095                    hs->hs_flags, hs->hs_rehash_count);
2096
2097         /*
2098          * The distribution is a summary of the chained hash depth in
2099          * each of the libcfs hash buckets.  Each buckets hsb_count is
2100          * divided by the hash theta value and used to generate a
2101          * histogram of the hash distribution.  A uniform hash will
2102          * result in all hash buckets being close to the average thus
2103          * only the first few entries in the histogram will be non-zero.
2104          * If you hash function results in a non-uniform hash the will
2105          * be observable by outlier bucks in the distribution histogram.
2106          *
2107          * Uniform hash distribution:           128/128/0/0/0/0/0/0
2108          * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2109          */
2110         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2111                 struct cfs_hash_bd bd;
2112
2113                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2114                 cfs_hash_bd_lock(hs, &bd, 0);
2115                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2116                         maxdep  = bd.bd_bucket->hsb_depmax;
2117                         maxdepb = ffz(~maxdep);
2118                 }
2119                 total += bd.bd_bucket->hsb_count;
2120                 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2121                 cfs_hash_bd_unlock(hs, &bd, 0);
2122         }
2123
2124         seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2125         for (i = 0; i < 8; i++)
2126                 seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2127
2128         cfs_hash_unlock(hs, 0);
2129 }
2130 EXPORT_SYMBOL(cfs_hash_debug_str);