Whamcloud - gitweb
LU-4423 libcfs: Use swap() in cfs_hash_bd_order()
[fs/lustre-release.git] / libcfs / libcfs / hash.c
index d76e31f..bae84df 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  * - move all stuff to libcfs
  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
- * - buckets are allocated one by one(intead of contiguous memory),
+ * - buckets are allocated one by one(instead of contiguous memory),
  *   to avoid unnecessary cacheline conflict
  *
  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
- * - "bucket" is a group of hlist_head now, user can speicify bucket size
+ * - "bucket" is a group of hlist_head now, user can specify bucket size
  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
  *   one lock for reducing memory overhead.
  *
  *   Now we support both locked iteration & lockless iteration of hash
  *   table. Also, user can break the iteration by return 1 in callback.
  */
+#include <linux/seq_file.h>
 
+#include <libcfs/linux/linux-list.h>
 #include <libcfs/libcfs.h>
 
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 static unsigned int warn_on_depth = 8;
-CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
-                "warning when hash depth is high.");
+module_param(warn_on_depth, uint, 0644);
+MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
 #endif
 
 struct cfs_wi_sched *cfs_sched_rehash;
 
 static inline void
-cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
+cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
 
 static inline void
-cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
+cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
 
 static inline void
-cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
-__acquires(&lock->spin)
+cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
+       __acquires(&lock->spin)
 {
        spin_lock(&lock->spin);
 }
 
 static inline void
-cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
-__releases(&lock->spin)
+cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
+       __releases(&lock->spin)
 {
        spin_unlock(&lock->spin);
 }
 
 static inline void
-cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
-__acquires(&lock->rw)
+cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
+       __acquires(&lock->rw)
 {
        if (!exclusive)
                read_lock(&lock->rw);
@@ -148,8 +146,8 @@ __acquires(&lock->rw)
 }
 
 static inline void
-cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
-__releases(&lock->rw)
+cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
+       __releases(&lock->rw)
 {
        if (!exclusive)
                read_unlock(&lock->rw);
@@ -158,61 +156,55 @@ __releases(&lock->rw)
 }
 
 /** No lock hash */
-static cfs_hash_lock_ops_t cfs_hash_nl_lops =
-{
-        .hs_lock        = cfs_hash_nl_lock,
-        .hs_unlock      = cfs_hash_nl_unlock,
-        .hs_bkt_lock    = cfs_hash_nl_lock,
-        .hs_bkt_unlock  = cfs_hash_nl_unlock,
+static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
+       .hs_lock        = cfs_hash_nl_lock,
+       .hs_unlock      = cfs_hash_nl_unlock,
+       .hs_bkt_lock    = cfs_hash_nl_lock,
+       .hs_bkt_unlock  = cfs_hash_nl_unlock,
 };
 
 /** no bucket lock, one spinlock to protect everything */
-static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
-{
-        .hs_lock        = cfs_hash_spin_lock,
-        .hs_unlock      = cfs_hash_spin_unlock,
-        .hs_bkt_lock    = cfs_hash_nl_lock,
-        .hs_bkt_unlock  = cfs_hash_nl_unlock,
+static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
+       .hs_lock        = cfs_hash_spin_lock,
+       .hs_unlock      = cfs_hash_spin_unlock,
+       .hs_bkt_lock    = cfs_hash_nl_lock,
+       .hs_bkt_unlock  = cfs_hash_nl_unlock,
 };
 
 /** spin bucket lock, rehash is enabled */
-static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
-{
-        .hs_lock        = cfs_hash_rw_lock,
-        .hs_unlock      = cfs_hash_rw_unlock,
-        .hs_bkt_lock    = cfs_hash_spin_lock,
-        .hs_bkt_unlock  = cfs_hash_spin_unlock,
+static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
+       .hs_lock        = cfs_hash_rw_lock,
+       .hs_unlock      = cfs_hash_rw_unlock,
+       .hs_bkt_lock    = cfs_hash_spin_lock,
+       .hs_bkt_unlock  = cfs_hash_spin_unlock,
 };
 
 /** rw bucket lock, rehash is enabled */
-static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
-{
-        .hs_lock        = cfs_hash_rw_lock,
-        .hs_unlock      = cfs_hash_rw_unlock,
-        .hs_bkt_lock    = cfs_hash_rw_lock,
-        .hs_bkt_unlock  = cfs_hash_rw_unlock,
+static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
+       .hs_lock        = cfs_hash_rw_lock,
+       .hs_unlock      = cfs_hash_rw_unlock,
+       .hs_bkt_lock    = cfs_hash_rw_lock,
+       .hs_bkt_unlock  = cfs_hash_rw_unlock,
 };
 
 /** spin bucket lock, rehash is disabled */
-static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
-{
-        .hs_lock        = cfs_hash_nl_lock,
-        .hs_unlock      = cfs_hash_nl_unlock,
-        .hs_bkt_lock    = cfs_hash_spin_lock,
-        .hs_bkt_unlock  = cfs_hash_spin_unlock,
+static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
+       .hs_lock        = cfs_hash_nl_lock,
+       .hs_unlock      = cfs_hash_nl_unlock,
+       .hs_bkt_lock    = cfs_hash_spin_lock,
+       .hs_bkt_unlock  = cfs_hash_spin_unlock,
 };
 
 /** rw bucket lock, rehash is disabled */
-static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
-{
-        .hs_lock        = cfs_hash_nl_lock,
-        .hs_unlock      = cfs_hash_nl_unlock,
-        .hs_bkt_lock    = cfs_hash_rw_lock,
-        .hs_bkt_unlock  = cfs_hash_rw_unlock,
+static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
+       .hs_lock        = cfs_hash_nl_lock,
+       .hs_unlock      = cfs_hash_nl_unlock,
+       .hs_bkt_lock    = cfs_hash_rw_lock,
+       .hs_bkt_unlock  = cfs_hash_rw_unlock,
 };
 
 static void
-cfs_hash_lock_setup(cfs_hash_t *hs)
+cfs_hash_lock_setup(struct cfs_hash *hs)
 {
        if (cfs_hash_with_no_lock(hs)) {
                hs->hs_lops = &cfs_hash_nl_lops;
@@ -244,26 +236,27 @@ cfs_hash_lock_setup(cfs_hash_t *hs)
  * Simple hash head without depth tracking
  * new element is always added to head of hlist
  */
-typedef struct {
+struct cfs_hash_head {
        struct hlist_head       hh_head;        /**< entries list */
-} cfs_hash_head_t;
+};
 
 static int
-cfs_hash_hh_hhead_size(cfs_hash_t *hs)
+cfs_hash_hh_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_head_t);
+       return sizeof(struct cfs_hash_head);
 }
 
 static struct hlist_head *
-cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
+       struct cfs_hash_head *head;
 
+       head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].hh_head;
 }
 
 static int
-cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
@@ -271,7 +264,7 @@ cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 static int
-cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        hlist_del_init(hnode);
@@ -282,42 +275,46 @@ cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * Simple hash head with depth tracking
  * new element is always added to head of hlist
  */
-typedef struct {
+struct cfs_hash_head_dep {
        struct hlist_head       hd_head;        /**< entries list */
        unsigned int            hd_depth;       /**< list length */
-} cfs_hash_head_dep_t;
+};
 
 static int
-cfs_hash_hd_hhead_size(cfs_hash_t *hs)
+cfs_hash_hd_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_head_dep_t);
+       return sizeof(struct cfs_hash_head_dep);
 }
 
 static struct hlist_head *
-cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_head_dep_t   *head;
+       struct cfs_hash_head_dep   *head;
 
-       head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
+       head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].hd_head;
 }
 
 static int
-cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
-                                              cfs_hash_head_dep_t, hd_head);
+       struct cfs_hash_head_dep *hh;
+
+       hh = container_of(cfs_hash_hd_hhead(hs, bd),
+                         struct cfs_hash_head_dep, hd_head);
        hlist_add_head(hnode, &hh->hd_head);
        return ++hh->hd_depth;
 }
 
 static int
-cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
-                                              cfs_hash_head_dep_t, hd_head);
+       struct cfs_hash_head_dep *hh;
+
+       hh = container_of(cfs_hash_hd_hhead(hs, bd),
+                         struct cfs_hash_head_dep, hd_head);
        hlist_del_init(hnode);
        return --hh->hd_depth;
 }
@@ -326,35 +323,36 @@ cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * double links hash head without depth tracking
  * new element is always added to tail of hlist
  */
-typedef struct {
+struct cfs_hash_dhead {
        struct hlist_head       dh_head;        /**< entries list */
        struct hlist_node       *dh_tail;       /**< the last entry */
-} cfs_hash_dhead_t;
+};
 
 static int
-cfs_hash_dh_hhead_size(cfs_hash_t *hs)
+cfs_hash_dh_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_dhead_t);
+       return sizeof(struct cfs_hash_dhead);
 }
 
 static struct hlist_head *
-cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_dhead_t *head;
+       struct cfs_hash_dhead *head;
 
-       head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
+       head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].dh_head;
 }
 
 static int
-cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
-                                           cfs_hash_dhead_t, dh_head);
+       struct cfs_hash_dhead *dh;
 
+       dh = container_of(cfs_hash_dh_hhead(hs, bd),
+                         struct cfs_hash_dhead, dh_head);
        if (dh->dh_tail != NULL) /* not empty */
-               hlist_add_after(dh->dh_tail, hnode);
+               hlist_add_behind(hnode, dh->dh_tail);
        else /* empty list */
                hlist_add_head(hnode, &dh->dh_head);
        dh->dh_tail = hnode;
@@ -362,12 +360,13 @@ cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 static int
-cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnd)
 {
-       cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
-                                           cfs_hash_dhead_t, dh_head);
+       struct cfs_hash_dhead *dh;
 
+       dh = container_of(cfs_hash_dh_hhead(hs, bd),
+                         struct cfs_hash_dhead, dh_head);
        if (hnd->next == NULL) { /* it's the tail */
                dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
                              container_of(hnd->pprev, struct hlist_node, next);
@@ -380,36 +379,37 @@ cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * double links hash head with depth tracking
  * new element is always added to tail of hlist
  */
-typedef struct {
+struct cfs_hash_dhead_dep {
        struct hlist_head       dd_head;        /**< entries list */
        struct hlist_node       *dd_tail;       /**< the last entry */
        unsigned int            dd_depth;       /**< list length */
-} cfs_hash_dhead_dep_t;
+};
 
 static int
-cfs_hash_dd_hhead_size(cfs_hash_t *hs)
+cfs_hash_dd_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_dhead_dep_t);
+       return sizeof(struct cfs_hash_dhead_dep);
 }
 
 static struct hlist_head *
-cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_dhead_dep_t *head;
+       struct cfs_hash_dhead_dep *head;
 
-       head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
+       head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].dd_head;
 }
 
 static int
-cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
-                                               cfs_hash_dhead_dep_t, dd_head);
+       struct cfs_hash_dhead_dep *dh;
 
+       dh = container_of(cfs_hash_dd_hhead(hs, bd),
+                         struct cfs_hash_dhead_dep, dd_head);
        if (dh->dd_tail != NULL) /* not empty */
-               hlist_add_after(dh->dd_tail, hnode);
+               hlist_add_behind(hnode, dh->dd_tail);
        else /* empty list */
                hlist_add_head(hnode, &dh->dd_head);
        dh->dd_tail = hnode;
@@ -417,12 +417,13 @@ cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 static int
-cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnd)
 {
-       cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
-                                               cfs_hash_dhead_dep_t, dd_head);
+       struct cfs_hash_dhead_dep *dh;
 
+       dh = container_of(cfs_hash_dd_hhead(hs, bd),
+                         struct cfs_hash_dhead_dep, dd_head);
        if (hnd->next == NULL) { /* it's the tail */
                dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
                              container_of(hnd->pprev, struct hlist_node, next);
@@ -431,28 +432,28 @@ cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
        return --dh->dd_depth;
 }
 
-static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
+static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
        .hop_hhead      = cfs_hash_hh_hhead,
        .hop_hhead_size = cfs_hash_hh_hhead_size,
        .hop_hnode_add  = cfs_hash_hh_hnode_add,
        .hop_hnode_del  = cfs_hash_hh_hnode_del,
 };
 
-static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
+static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
        .hop_hhead      = cfs_hash_hd_hhead,
        .hop_hhead_size = cfs_hash_hd_hhead_size,
        .hop_hnode_add  = cfs_hash_hd_hnode_add,
        .hop_hnode_del  = cfs_hash_hd_hnode_del,
 };
 
-static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
+static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
        .hop_hhead      = cfs_hash_dh_hhead,
        .hop_hhead_size = cfs_hash_dh_hhead_size,
        .hop_hnode_add  = cfs_hash_dh_hnode_add,
        .hop_hnode_del  = cfs_hash_dh_hnode_del,
 };
 
-static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
+static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
        .hop_hhead      = cfs_hash_dd_hhead,
        .hop_hhead_size = cfs_hash_dd_hhead_size,
        .hop_hnode_add  = cfs_hash_dd_hnode_add,
@@ -460,7 +461,7 @@ static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
 };
 
 static void
-cfs_hash_hlist_setup(cfs_hash_t *hs)
+cfs_hash_hlist_setup(struct cfs_hash *hs)
 {
         if (cfs_hash_with_add_tail(hs)) {
                 hs->hs_hops = cfs_hash_with_depth(hs) ?
@@ -472,8 +473,8 @@ cfs_hash_hlist_setup(cfs_hash_t *hs)
 }
 
 static void
-cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
-                     unsigned int bits, const void *key, cfs_hash_bd_t *bd)
+cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
+                    unsigned int bits, const void *key, struct cfs_hash_bd *bd)
 {
         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
 
@@ -484,7 +485,7 @@ cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
 }
 
 void
-cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
+cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
 {
         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
         if (likely(hs->hs_rehash_buckets == NULL)) {
@@ -499,7 +500,7 @@ cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
 EXPORT_SYMBOL(cfs_hash_bd_get);
 
 static inline void
-cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
+cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
 {
         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
                 return;
@@ -522,7 +523,7 @@ cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
 }
 
 void
-cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                        struct hlist_node *hnode)
 {
        int rc;
@@ -542,7 +543,7 @@ cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
 
 void
-cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                       struct hlist_node *hnode)
 {
        hs->hs_hops->hop_hnode_del(hs, bd, hnode);
@@ -563,11 +564,11 @@ cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
 
 void
-cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
-                       cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
+cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
+                       struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
 {
-        cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
-        cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
+       struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
+       struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
         int                rc;
 
         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
@@ -589,7 +590,6 @@ cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
         if (unlikely(nbkt->hsb_version == 0))
                 nbkt->hsb_version++;
 }
-EXPORT_SYMBOL(cfs_hash_bd_move_locked);
 
 enum {
         /** always set, for sanity (avoid ZERO intent) */
@@ -602,7 +602,7 @@ enum {
         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
 };
 
-typedef enum cfs_hash_lookup_intent {
+enum cfs_hash_lookup_intent {
         /** return item w/o refcount */
         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
         /** return item with refcount */
@@ -617,12 +617,12 @@ typedef enum cfs_hash_lookup_intent {
         /** delete if existed */
         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
                                        CFS_HS_LOOKUP_MASK_DEL)
-} cfs_hash_lookup_intent_t;
+};
 
 static struct hlist_node *
-cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                          const void *key, struct hlist_node *hnode,
-                         cfs_hash_lookup_intent_t intent)
+                         enum cfs_hash_lookup_intent intent)
 
 {
        struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
@@ -661,7 +661,8 @@ cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 struct hlist_node *
-cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                         const void *key)
 {
        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
                                        CFS_HS_LOOKUP_IT_FIND);
@@ -669,39 +670,19 @@ cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
 
 struct hlist_node *
-cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                       const void *key)
 {
        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
                                        CFS_HS_LOOKUP_IT_PEEK);
 }
 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
 
-struct hlist_node *
-cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                          const void *key, struct hlist_node *hnode,
-                           int noref)
-{
-       return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
-                                       CFS_HS_LOOKUP_IT_ADD |
-                                       (!noref * CFS_HS_LOOKUP_MASK_REF));
-}
-EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
-
-struct hlist_node *
-cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                          const void *key, struct hlist_node *hnode)
-{
-       /* hnode can be NULL, we find the first item with @key */
-       return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
-                                       CFS_HS_LOOKUP_IT_FINDDEL);
-}
-EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
-
 static void
-cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                        unsigned n, int excl)
 {
-        cfs_hash_bucket_t *prev = NULL;
+       struct cfs_hash_bucket *prev = NULL;
         int                i;
 
         /**
@@ -721,10 +702,10 @@ cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 }
 
 static void
-cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                          unsigned n, int excl)
 {
-        cfs_hash_bucket_t *prev = NULL;
+       struct cfs_hash_bucket *prev = NULL;
         int                i;
 
         cfs_hash_for_each_bd(bds, n, i) {
@@ -736,7 +717,7 @@ cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 }
 
 static struct hlist_node *
-cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                unsigned n, const void *key)
 {
        struct hlist_node *ehnode;
@@ -752,8 +733,8 @@ cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 }
 
 static struct hlist_node *
-cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
-                                cfs_hash_bd_t *bds, unsigned n, const void *key,
+cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
+                                unsigned n, const void *key,
                                 struct hlist_node *hnode, int noref)
 {
        struct hlist_node *ehnode;
@@ -773,7 +754,7 @@ cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
         if (i == 1) { /* only one bucket */
                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
         } else {
-                cfs_hash_bd_t      mybd;
+               struct cfs_hash_bd      mybd;
 
                 cfs_hash_bd_get(hs, key, &mybd);
                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
@@ -783,7 +764,7 @@ cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
 }
 
 static struct hlist_node *
-cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                 unsigned n, const void *key,
                                 struct hlist_node *hnode)
 {
@@ -800,7 +781,7 @@ cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 }
 
 static void
-cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
+cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
 {
         int     rc;
 
@@ -817,17 +798,14 @@ cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
         if (rc == 0) {
                 bd2->bd_bucket = NULL;
 
-        } else if (rc > 0) { /* swab bd1 and bd2 */
-                cfs_hash_bd_t tmp;
-
-                tmp = *bd2;
-                *bd2 = *bd1;
-                *bd1 = tmp;
+       } else if (rc > 0) {
+               swap(*bd1, *bd2); /* swab bd1 and bd2 */
         }
 }
 
 void
-cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
+cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
+                    struct cfs_hash_bd *bds)
 {
         /* NB: caller should hold hs_lock.rw if REHASH is set */
         cfs_hash_bd_from_key(hs, hs->hs_buckets,
@@ -844,50 +822,44 @@ cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
 
         cfs_hash_bd_order(&bds[0], &bds[1]);
 }
-EXPORT_SYMBOL(cfs_hash_dual_bd_get);
 
 void
-cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
+cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
 {
         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
 }
-EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
 
 void
-cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
+cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
 {
         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
 }
-EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
 
 struct hlist_node *
-cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key)
 {
         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
 }
-EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
 
 struct hlist_node *
-cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key, struct hlist_node *hnode,
                                int noref)
 {
        return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
                                                hnode, noref);
 }
-EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
 
 struct hlist_node *
-cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key, struct hlist_node *hnode)
 {
        return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
 }
-EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
 
 static void
-cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
+cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
                       int bkt_size, int prev_size, int size)
 {
         int     i;
@@ -905,11 +877,11 @@ cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
  * needed, the newly allocated buckets if allocation was needed and
  * successful, and NULL on error.
  */
-static cfs_hash_bucket_t **
-cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
+static struct cfs_hash_bucket **
+cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
                          unsigned int old_size, unsigned int new_size)
 {
-        cfs_hash_bucket_t **new_bkts;
+       struct cfs_hash_bucket **new_bkts;
         int                 i;
 
         LASSERT(old_size == 0 || old_bkts != NULL);
@@ -928,7 +900,7 @@ cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
 
        for (i = old_size; i < new_size; i++) {
                struct hlist_head *hhead;
-               cfs_hash_bd_t     bd;
+               struct cfs_hash_bd     bd;
 
                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
                 if (new_bkts[i] == NULL) {
@@ -967,12 +939,12 @@ cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
  *           - CFS_HASH_SORT enable chained hash sort
  */
-static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
+static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
 
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static int cfs_hash_dep_print(cfs_workitem_t *wi)
+static int cfs_hash_dep_print(struct cfs_workitem *wi)
 {
-       cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+       struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
        int         dep;
        int         bkt;
        int         off;
@@ -993,13 +965,13 @@ static int cfs_hash_dep_print(cfs_workitem_t *wi)
        return 0;
 }
 
-static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
+static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
 {
        spin_lock_init(&hs->hs_dep_lock);
        cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
 }
 
-static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
+static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
 {
        if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
                return;
@@ -1015,18 +987,18 @@ static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
 
 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
 
-static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
-static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
+static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
+static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
 
 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
 
-cfs_hash_t *
+struct cfs_hash *
 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
                 unsigned bkt_bits, unsigned extra_bytes,
                 unsigned min_theta, unsigned max_theta,
-                cfs_hash_ops_t *ops, unsigned flags)
+               struct cfs_hash_ops *ops, unsigned flags)
 {
-        cfs_hash_t *hs;
+       struct cfs_hash *hs;
         int         len;
 
         ENTRY;
@@ -1040,7 +1012,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
         LASSERT(ops->hs_object);
         LASSERT(ops->hs_keycmp);
         LASSERT(ops->hs_get != NULL);
-        LASSERT(ops->hs_put_locked != NULL);
+       LASSERT(ops->hs_put != NULL || ops->hs_put_locked != NULL);
 
         if ((flags & CFS_HASH_REHASH) != 0)
                 flags |= CFS_HASH_COUNTER; /* must have counter */
@@ -1056,7 +1028,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
 
         len = (flags & CFS_HASH_BIGNAME) == 0 ?
               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
-        LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
+       LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
         if (hs == NULL)
                 RETURN(NULL);
 
@@ -1088,7 +1060,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
         if (hs->hs_buckets != NULL)
                 return hs;
 
-        LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
+       LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
         RETURN(NULL);
 }
 EXPORT_SYMBOL(cfs_hash_create);
@@ -1097,11 +1069,11 @@ EXPORT_SYMBOL(cfs_hash_create);
  * Cleanup libcfs hash @hs.
  */
 static void
-cfs_hash_destroy(cfs_hash_t *hs)
+cfs_hash_destroy(struct cfs_hash *hs)
 {
        struct hlist_node     *hnode;
        struct hlist_node     *pos;
-       cfs_hash_bd_t         bd;
+       struct cfs_hash_bd         bd;
        int                   i;
        ENTRY;
 
@@ -1153,12 +1125,12 @@ cfs_hash_destroy(cfs_hash_t *hs)
                              0, CFS_HASH_NBKT(hs));
        i = cfs_hash_with_bigname(hs) ?
            CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
-       LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
+       LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
 
        EXIT;
 }
 
-cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
+struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
 {
        if (atomic_inc_not_zero(&hs->hs_refcount))
                return hs;
@@ -1166,7 +1138,7 @@ cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
 }
 EXPORT_SYMBOL(cfs_hash_getref);
 
-void cfs_hash_putref(cfs_hash_t *hs)
+void cfs_hash_putref(struct cfs_hash *hs)
 {
        if (atomic_dec_and_test(&hs->hs_refcount))
                cfs_hash_destroy(hs);
@@ -1174,7 +1146,7 @@ void cfs_hash_putref(cfs_hash_t *hs)
 EXPORT_SYMBOL(cfs_hash_putref);
 
 static inline int
-cfs_hash_rehash_bits(cfs_hash_t *hs)
+cfs_hash_rehash_bits(struct cfs_hash *hs)
 {
         if (cfs_hash_with_no_lock(hs) ||
             !cfs_hash_with_rehash(hs))
@@ -1211,7 +1183,7 @@ cfs_hash_rehash_bits(cfs_hash_t *hs)
  * - too many elements
  */
 static inline int
-cfs_hash_rehash_inline(cfs_hash_t *hs)
+cfs_hash_rehash_inline(struct cfs_hash *hs)
 {
        return !cfs_hash_with_nblk_change(hs) &&
               atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
@@ -1222,9 +1194,9 @@ cfs_hash_rehash_inline(cfs_hash_t *hs)
  * ops->hs_get function will be called when the item is added.
  */
 void
-cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 {
-        cfs_hash_bd_t   bd;
+       struct cfs_hash_bd   bd;
         int             bits;
 
        LASSERT(hlist_unhashed(hnode));
@@ -1245,30 +1217,30 @@ cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
 EXPORT_SYMBOL(cfs_hash_add);
 
 static struct hlist_node *
-cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
+cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
                     struct hlist_node *hnode, int noref)
 {
        struct hlist_node *ehnode;
-       cfs_hash_bd_t     bds[2];
+       struct cfs_hash_bd     bds[2];
        int               bits = 0;
 
-       LASSERT(hlist_unhashed(hnode));
+       LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
 
-        cfs_hash_lock(hs, 0);
-        cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
+       cfs_hash_lock(hs, 0);
+       cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
 
-        cfs_hash_key_validate(hs, key, hnode);
-        ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
-                                                 hnode, noref);
-        cfs_hash_dual_bd_unlock(hs, bds, 1);
+       cfs_hash_key_validate(hs, key, hnode);
+       ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
+                                                hnode, noref);
+       cfs_hash_dual_bd_unlock(hs, bds, 1);
 
-        if (ehnode == hnode) /* new item added */
-                bits = cfs_hash_rehash_bits(hs);
-        cfs_hash_unlock(hs, 0);
-        if (bits > 0)
-                cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
+       if (ehnode == hnode) /* new item added */
+               bits = cfs_hash_rehash_bits(hs);
+       cfs_hash_unlock(hs, 0);
+       if (bits > 0)
+               cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
 
-        return ehnode;
+       return ehnode;
 }
 
 /**
@@ -1277,7 +1249,8 @@ cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
  * Returns 0 on success or -EALREADY on key collisions.
  */
 int
-cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
+                   struct hlist_node *hnode)
 {
        return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
               -EALREADY : 0;
@@ -1291,7 +1264,7 @@ EXPORT_SYMBOL(cfs_hash_add_unique);
  * Otherwise ops->hs_get is called on the item which was added.
  */
 void *
-cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
+cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
                        struct hlist_node *hnode)
 {
        hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
@@ -1308,11 +1281,11 @@ EXPORT_SYMBOL(cfs_hash_findadd_unique);
  * on the removed object.
  */
 void *
-cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 {
         void           *obj  = NULL;
         int             bits = 0;
-        cfs_hash_bd_t   bds[2];
+       struct cfs_hash_bd   bds[2];
 
         cfs_hash_lock(hs, 0);
         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
@@ -1348,7 +1321,7 @@ EXPORT_SYMBOL(cfs_hash_del);
  * will be returned and ops->hs_put is called on the removed object.
  */
 void *
-cfs_hash_del_key(cfs_hash_t *hs, const void *key)
+cfs_hash_del_key(struct cfs_hash *hs, const void *key)
 {
         return cfs_hash_del(hs, key, NULL);
 }
@@ -1363,11 +1336,11 @@ EXPORT_SYMBOL(cfs_hash_del_key);
  * in the hash @hs NULL is returned.
  */
 void *
-cfs_hash_lookup(cfs_hash_t *hs, const void *key)
+cfs_hash_lookup(struct cfs_hash *hs, const void *key)
 {
         void                 *obj = NULL;
        struct hlist_node     *hnode;
-        cfs_hash_bd_t         bds[2];
+       struct cfs_hash_bd         bds[2];
 
         cfs_hash_lock(hs, 0);
         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
@@ -1384,7 +1357,7 @@ cfs_hash_lookup(cfs_hash_t *hs, const void *key)
 EXPORT_SYMBOL(cfs_hash_lookup);
 
 static void
-cfs_hash_for_each_enter(cfs_hash_t *hs)
+cfs_hash_for_each_enter(struct cfs_hash *hs)
 {
         LASSERT(!cfs_hash_is_exiting(hs));
 
@@ -1393,7 +1366,7 @@ cfs_hash_for_each_enter(cfs_hash_t *hs)
         /*
          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
          * because it's just an unreliable signal to rehash-thread,
-         * rehash-thread will try to finsih rehash ASAP when seeing this.
+        * rehash-thread will try to finish rehash ASAP when seeing this.
          */
         hs->hs_iterating = 1;
 
@@ -1401,7 +1374,7 @@ cfs_hash_for_each_enter(cfs_hash_t *hs)
         hs->hs_iterators++;
 
         /* NB: iteration is mostly called by service thread,
-         * we tend to cancel pending rehash-requst, instead of
+        * we tend to cancel pending rehash-request, instead of
          * blocking service thread, we will relaunch rehash request
          * after iteration */
         if (cfs_hash_is_rehashing(hs))
@@ -1410,7 +1383,7 @@ cfs_hash_for_each_enter(cfs_hash_t *hs)
 }
 
 static void
-cfs_hash_for_each_exit(cfs_hash_t *hs)
+cfs_hash_for_each_exit(struct cfs_hash *hs)
 {
        int remained;
        int bits;
@@ -1441,12 +1414,12 @@ cfs_hash_for_each_exit(cfs_hash_t *hs)
  *      cfs_hash_bd_del_locked
  */
 static __u64
-cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
+cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
                        void *data, int remove_safe)
 {
        struct hlist_node       *hnode;
        struct hlist_node       *pos;
-       cfs_hash_bd_t           bd;
+       struct cfs_hash_bd      bd;
        __u64                   count = 0;
        int                     excl  = !!remove_safe;
        int                     loop  = 0;
@@ -1494,16 +1467,16 @@ cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
        RETURN(count);
 }
 
-typedef struct {
+struct cfs_hash_cond_arg {
         cfs_hash_cond_opt_cb_t  func;
         void                   *arg;
-} cfs_hash_cond_arg_t;
+};
 
 static int
-cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                         struct hlist_node *hnode, void *data)
 {
-        cfs_hash_cond_arg_t *cond = data;
+       struct cfs_hash_cond_arg *cond = data;
 
         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
                 cfs_hash_bd_del_locked(hs, bd, hnode);
@@ -1516,9 +1489,9 @@ cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * any object be reference.
  */
 void
-cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
+cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
 {
-        cfs_hash_cond_arg_t arg = {
+       struct cfs_hash_cond_arg arg = {
                 .func   = func,
                 .arg    = data,
         };
@@ -1528,7 +1501,7 @@ cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
 EXPORT_SYMBOL(cfs_hash_cond_del);
 
 void
-cfs_hash_for_each(cfs_hash_t *hs,
+cfs_hash_for_each(struct cfs_hash *hs,
                   cfs_hash_for_each_cb_t func, void *data)
 {
         cfs_hash_for_each_tight(hs, func, data, 0);
@@ -1536,7 +1509,7 @@ cfs_hash_for_each(cfs_hash_t *hs,
 EXPORT_SYMBOL(cfs_hash_for_each);
 
 void
-cfs_hash_for_each_safe(cfs_hash_t *hs,
+cfs_hash_for_each_safe(struct cfs_hash *hs,
                        cfs_hash_for_each_cb_t func, void *data)
 {
         cfs_hash_for_each_tight(hs, func, data, 1);
@@ -1544,7 +1517,7 @@ cfs_hash_for_each_safe(cfs_hash_t *hs,
 EXPORT_SYMBOL(cfs_hash_for_each_safe);
 
 static int
-cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
              struct hlist_node *hnode, void *data)
 {
        *(int *)data = 0;
@@ -1552,7 +1525,7 @@ cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 int
-cfs_hash_is_empty(cfs_hash_t *hs)
+cfs_hash_is_empty(struct cfs_hash *hs)
 {
         int empty = 1;
 
@@ -1562,7 +1535,7 @@ cfs_hash_is_empty(cfs_hash_t *hs)
 EXPORT_SYMBOL(cfs_hash_is_empty);
 
 __u64
-cfs_hash_size_get(cfs_hash_t *hs)
+cfs_hash_size_get(struct cfs_hash *hs)
 {
        return cfs_hash_with_counter(hs) ?
               atomic_read(&hs->hs_count) :
@@ -1586,70 +1559,102 @@ EXPORT_SYMBOL(cfs_hash_size_get);
  * two cases, so iteration has to be stopped on change.
  */
 static int
-cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
+cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
+                       void *data, int start)
 {
-       struct hlist_node *hnode;
-       struct hlist_node *tmp;
-        cfs_hash_bd_t     bd;
-        __u32             version;
-        int               count = 0;
-        int               stop_on_change;
-        int               rc;
-        int               i;
-        ENTRY;
+       struct hlist_node       *hnode;
+       struct hlist_node       *next = NULL;
+       struct cfs_hash_bd      bd;
+       __u32                   version;
+       int                     count = 0;
+       int                     stop_on_change;
+       int                     has_put_locked;
+       int                     rc = 0;
+       int                     i, end = -1;
+       ENTRY;
 
-        stop_on_change = cfs_hash_with_rehash_key(hs) ||
-                         !cfs_hash_with_no_itemref(hs) ||
-                         CFS_HOP(hs, put_locked) == NULL;
-        cfs_hash_lock(hs, 0);
-        LASSERT(!cfs_hash_is_rehashing(hs));
+       stop_on_change = cfs_hash_with_rehash_key(hs) ||
+                        !cfs_hash_with_no_itemref(hs);
+       has_put_locked = hs->hs_ops->hs_put_locked != NULL;
+       cfs_hash_lock(hs, 0);
+again:
+       LASSERT(!cfs_hash_is_rehashing(hs));
 
        cfs_hash_for_each_bucket(hs, &bd, i) {
                struct hlist_head *hhead;
 
+               if (i < start)
+                       continue;
+               else if (end > 0 && i >= end)
+                       break;
+
                 cfs_hash_bd_lock(hs, &bd, 0);
                 version = cfs_hash_bd_version_get(&bd);
 
                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
-                        for (hnode = hhead->first; hnode != NULL;) {
-                                cfs_hash_bucket_validate(hs, &bd, hnode);
-                                cfs_hash_get(hs, hnode);
+                       hnode = hhead->first;
+                       if (hnode == NULL)
+                               continue;
+                       cfs_hash_get(hs, hnode);
+                       for (; hnode != NULL; hnode = next) {
+                               cfs_hash_bucket_validate(hs, &bd, hnode);
+                               next = hnode->next;
+                               if (next != NULL)
+                                       cfs_hash_get(hs, next);
                                 cfs_hash_bd_unlock(hs, &bd, 0);
                                 cfs_hash_unlock(hs, 0);
 
                                rc = func(hs, &bd, hnode, data);
-                               if (stop_on_change)
+                               if (stop_on_change || !has_put_locked)
                                        cfs_hash_put(hs, hnode);
+
                                cond_resched();
                                count++;
 
                                 cfs_hash_lock(hs, 0);
                                 cfs_hash_bd_lock(hs, &bd, 0);
-                                if (!stop_on_change) {
-                                        tmp = hnode->next;
-                                        cfs_hash_put_locked(hs, hnode);
-                                        hnode = tmp;
-                                } else { /* bucket changed? */
-                                        if (version !=
-                                            cfs_hash_bd_version_get(&bd))
-                                                break;
-                                        /* safe to continue because no change */
-                                        hnode = hnode->next;
-                                }
+                               if (stop_on_change) {
+                                       if (version !=
+                                           cfs_hash_bd_version_get(&bd))
+                                               rc = -EINTR;
+                               } else if (has_put_locked) {
+                                       cfs_hash_put_locked(hs, hnode);
+                               }
                                 if (rc) /* callback wants to break iteration */
                                         break;
                         }
+                       if (next != NULL) {
+                               if (has_put_locked) {
+                                       cfs_hash_put_locked(hs, next);
+                                       next = NULL;
+                               }
+                               break;
+                       } else if (rc != 0) {
+                               break;
+                       }
                 }
                 cfs_hash_bd_unlock(hs, &bd, 0);
+               if (next != NULL && !has_put_locked) {
+                       cfs_hash_put(hs, next);
+                       next = NULL;
+               }
+               if (rc) /* callback wants to break iteration */
+                       break;
         }
-        cfs_hash_unlock(hs, 0);
 
-        return count;
+       if (start > 0 && rc == 0) {
+               end = start;
+               start = 0;
+               goto again;
+       }
+
+       cfs_hash_unlock(hs, 0);
+       return count;
 }
 
 int
-cfs_hash_for_each_nolock(cfs_hash_t *hs,
-                         cfs_hash_for_each_cb_t func, void *data)
+cfs_hash_for_each_nolock(struct cfs_hash *hs,
+                        cfs_hash_for_each_cb_t func, void *data, int start)
 {
         ENTRY;
 
@@ -1658,16 +1663,16 @@ cfs_hash_for_each_nolock(cfs_hash_t *hs,
             !cfs_hash_with_no_itemref(hs))
                 RETURN(-EOPNOTSUPP);
 
-        if (CFS_HOP(hs, get) == NULL ||
-            (CFS_HOP(hs, put) == NULL &&
-             CFS_HOP(hs, put_locked) == NULL))
-                RETURN(-EOPNOTSUPP);
+       if (hs->hs_ops->hs_get == NULL ||
+          (hs->hs_ops->hs_put == NULL &&
+           hs->hs_ops->hs_put_locked == NULL))
+               RETURN(-EOPNOTSUPP);
 
-        cfs_hash_for_each_enter(hs);
-        cfs_hash_for_each_relax(hs, func, data);
-        cfs_hash_for_each_exit(hs);
+       cfs_hash_for_each_enter(hs);
+       cfs_hash_for_each_relax(hs, func, data, start);
+       cfs_hash_for_each_exit(hs);
 
-        RETURN(0);
+       RETURN(0);
 }
 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
 
@@ -1683,7 +1688,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_nolock);
  * the required locking is in place to prevent concurrent insertions.
  */
 int
-cfs_hash_for_each_empty(cfs_hash_t *hs,
+cfs_hash_for_each_empty(struct cfs_hash *hs,
                         cfs_hash_for_each_cb_t func, void *data)
 {
         unsigned  i = 0;
@@ -1692,28 +1697,28 @@ cfs_hash_for_each_empty(cfs_hash_t *hs,
         if (cfs_hash_with_no_lock(hs))
                 return -EOPNOTSUPP;
 
-        if (CFS_HOP(hs, get) == NULL ||
-            (CFS_HOP(hs, put) == NULL &&
-             CFS_HOP(hs, put_locked) == NULL))
-                return -EOPNOTSUPP;
+       if (hs->hs_ops->hs_get == NULL ||
+          (hs->hs_ops->hs_put == NULL &&
+           hs->hs_ops->hs_put_locked == NULL))
+               return -EOPNOTSUPP;
 
-        cfs_hash_for_each_enter(hs);
-        while (cfs_hash_for_each_relax(hs, func, data)) {
-                CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
-                       hs->hs_name, i++);
-        }
-        cfs_hash_for_each_exit(hs);
-        RETURN(0);
+       cfs_hash_for_each_enter(hs);
+       while (cfs_hash_for_each_relax(hs, func, data, 0)) {
+               CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
+                      hs->hs_name, i++);
+       }
+       cfs_hash_for_each_exit(hs);
+       RETURN(0);
 }
 EXPORT_SYMBOL(cfs_hash_for_each_empty);
 
 void
-cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
                        cfs_hash_for_each_cb_t func, void *data)
 {
        struct hlist_head *hhead;
        struct hlist_node *hnode;
-       cfs_hash_bd_t      bd;
+       struct cfs_hash_bd         bd;
 
         cfs_hash_for_each_enter(hs);
         cfs_hash_lock(hs, 0);
@@ -1743,11 +1748,11 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each);
  * is held so the callback must never sleep.
    */
 void
-cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
+cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
                        cfs_hash_for_each_cb_t func, void *data)
 {
        struct hlist_node *hnode;
-       cfs_hash_bd_t      bds[2];
+       struct cfs_hash_bd         bds[2];
        unsigned           i;
 
        cfs_hash_lock(hs, 0);
@@ -1784,7 +1789,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_key);
  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
  */
 void
-cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
+cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
 {
         int     i;
 
@@ -1810,19 +1815,17 @@ cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
                cfs_hash_lock(hs, 1);
        }
 }
-EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
 
 void
-cfs_hash_rehash_cancel(cfs_hash_t *hs)
+cfs_hash_rehash_cancel(struct cfs_hash *hs)
 {
         cfs_hash_lock(hs, 1);
         cfs_hash_rehash_cancel_locked(hs);
         cfs_hash_unlock(hs, 1);
 }
-EXPORT_SYMBOL(cfs_hash_rehash_cancel);
 
 int
-cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
+cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
 {
         int     rc;
 
@@ -1849,12 +1852,11 @@ cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
 
         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
 }
-EXPORT_SYMBOL(cfs_hash_rehash);
 
 static int
-cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
+cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
 {
-       cfs_hash_bd_t      new;
+       struct cfs_hash_bd      new;
        struct hlist_head *hhead;
        struct hlist_node *hnode;
        struct hlist_node *pos;
@@ -1882,19 +1884,20 @@ cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
 }
 
 static int
-cfs_hash_rehash_worker(cfs_workitem_t *wi)
-{
-        cfs_hash_t         *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
-        cfs_hash_bucket_t **bkts;
-        cfs_hash_bd_t       bd;
-        unsigned int        old_size;
-        unsigned int        new_size;
-        int                 bsize;
-        int                 count = 0;
-        int                 rc = 0;
-        int                 i;
+cfs_hash_rehash_worker(struct cfs_workitem *wi)
+{
+       struct cfs_hash         *hs =
+               container_of(wi, struct cfs_hash, hs_rehash_wi);
+       struct cfs_hash_bucket **bkts;
+       struct cfs_hash_bd      bd;
+       unsigned int            old_size;
+       unsigned int            new_size;
+       int                     bsize;
+       int                     count = 0;
+       int                     rc = 0;
+       int                     i;
 
-        LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
+       LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
 
         cfs_hash_lock(hs, 0);
         LASSERT(cfs_hash_is_rehashing(hs));
@@ -1976,7 +1979,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
         if (bkts != NULL)
                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
         if (rc != 0)
-                CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
+               CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
        /* return 1 only if cfs_wi_exit is called */
        return rc == -ESRCH;
 }
@@ -1991,12 +1994,12 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
  * the registered cfs_hash_get() and cfs_hash_put() functions will
  * not be called.
  */
-void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
+void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
                         void *new_key, struct hlist_node *hnode)
 {
-        cfs_hash_bd_t        bds[3];
-        cfs_hash_bd_t        old_bds[2];
-        cfs_hash_bd_t        new_bd;
+       struct cfs_hash_bd        bds[3];
+       struct cfs_hash_bd        old_bds[2];
+       struct cfs_hash_bd        new_bd;
 
        LASSERT(!hlist_unhashed(hnode));
 
@@ -2029,18 +2032,15 @@ void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
 }
 EXPORT_SYMBOL(cfs_hash_rehash_key);
 
-int cfs_hash_debug_header(struct seq_file *m)
+void cfs_hash_debug_header(struct seq_file *m)
 {
-       return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
-                       CFS_HASH_BIGNAME_LEN,
-                       "name", "cur", "min", "max", "theta", "t-min", "t-max",
-                       "flags", "rehash", "count", "maxdep", "maxdepb",
-                       " distribution");
+       seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
+                  CFS_HASH_BIGNAME_LEN, "name");
 }
 EXPORT_SYMBOL(cfs_hash_debug_header);
 
-static cfs_hash_bucket_t **
-cfs_hash_full_bkts(cfs_hash_t *hs)
+static struct cfs_hash_bucket **
+cfs_hash_full_bkts(struct cfs_hash *hs)
 {
         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
         if (hs->hs_rehash_buckets == NULL)
@@ -2052,7 +2052,7 @@ cfs_hash_full_bkts(cfs_hash_t *hs)
 }
 
 static unsigned int
-cfs_hash_full_nbkt(cfs_hash_t *hs)
+cfs_hash_full_nbkt(struct cfs_hash *hs)
 {
         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
         if (hs->hs_rehash_buckets == NULL)
@@ -2063,31 +2063,28 @@ cfs_hash_full_nbkt(cfs_hash_t *hs)
                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
 }
 
-int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
+void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
 {
-       int     dist[8] = { 0, };
-       int     maxdep  = -1;
-       int     maxdepb = -1;
-       int     total   = 0;
-       int     c       = 0;
-       int     theta;
-       int     i;
+       int dist[8] = { 0, };
+       int maxdep = -1;
+       int maxdepb = -1;
+       int total = 0;
+       int theta;
+       int i;
 
        cfs_hash_lock(hs, 0);
        theta = __cfs_hash_theta(hs);
 
-       c += seq_printf(m, "%-*s ", CFS_HASH_BIGNAME_LEN, hs->hs_name);
-       c += seq_printf(m, "%5d ",  1 << hs->hs_cur_bits);
-       c += seq_printf(m, "%5d ",  1 << hs->hs_min_bits);
-       c += seq_printf(m, "%5d ",  1 << hs->hs_max_bits);
-       c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(theta),
-                       __cfs_hash_theta_frac(theta));
-       c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_min_theta),
-                       __cfs_hash_theta_frac(hs->hs_min_theta));
-       c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_max_theta),
-                       __cfs_hash_theta_frac(hs->hs_max_theta));
-       c += seq_printf(m, " 0x%02x ", hs->hs_flags);
-       c += seq_printf(m, "%6d ", hs->hs_rehash_count);
+       seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
+                  CFS_HASH_BIGNAME_LEN, hs->hs_name,
+                  1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
+                  1 << hs->hs_max_bits,
+                  __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
+                  __cfs_hash_theta_int(hs->hs_min_theta),
+                  __cfs_hash_theta_frac(hs->hs_min_theta),
+                  __cfs_hash_theta_int(hs->hs_max_theta),
+                  __cfs_hash_theta_frac(hs->hs_max_theta),
+                  hs->hs_flags, hs->hs_rehash_count);
 
        /*
         * The distribution is a summary of the chained hash depth in
@@ -2103,28 +2100,23 @@ int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
         * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
         */
        for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
-               cfs_hash_bd_t bd;
+               struct cfs_hash_bd bd;
 
                bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
                cfs_hash_bd_lock(hs, &bd, 0);
                if (maxdep < bd.bd_bucket->hsb_depmax) {
                        maxdep  = bd.bd_bucket->hsb_depmax;
-#ifdef __KERNEL__
                        maxdepb = ffz(~maxdep);
-#endif
                }
                total += bd.bd_bucket->hsb_count;
-               dist[min(fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
+               dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
                cfs_hash_bd_unlock(hs, &bd, 0);
        }
 
-       c += seq_printf(m, "%7d ", total);
-       c += seq_printf(m, "%7d ", maxdep);
-       c += seq_printf(m, "%7d ", maxdepb);
+       seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
        for (i = 0; i < 8; i++)
-               c += seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
+               seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
 
        cfs_hash_unlock(hs, 0);
-       return c;
 }
 EXPORT_SYMBOL(cfs_hash_debug_str);