Whamcloud - gitweb
LU-6245 libcfs: cleanup up libcfs hash code for upstream 24/14624/10
authorJames Simmons <uja.ornl@yahoo.com>
Thu, 28 May 2015 14:55:05 +0000 (10:55 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 10 Jun 2015 02:51:29 +0000 (02:51 +0000)
This patch brings the libcfs hash handling up to linux
kernel coding style. Various typedefs and macros have
been removed. Also support for the change of in 3.18
kernels of hlist_add_behind are also handled.

Change-Id: If50e34cfe4aaa593f755c9214f350ce21da56f20
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: http://review.whamcloud.com/14624
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Reviewed-by: frank zago <fzago@cray.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
44 files changed:
contrib/scripts/checkpatch.pl
libcfs/autoconf/lustre-libcfs.m4
libcfs/include/libcfs/libcfs_hash.h
libcfs/include/libcfs/list.h
libcfs/libcfs/hash.c
lustre/include/lprocfs_status.h
lustre/include/lu_object.h
lustre/include/lustre_dlm.h
lustre/include/lustre_export.h
lustre/include/lustre_nodemap.h
lustre/include/lustre_nrs_crr.h
lustre/include/lustre_nrs_orr.h
lustre/include/lustre_nrs_tbf.h
lustre/include/obd.h
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c
lustre/llite/vvp_dev.c
lustre/lod/lod_internal.h
lustre/lod/lod_pool.c
lustre/lov/lov_internal.h
lustre/lov/lov_pool.c
lustre/mdt/mdt_lproc.c
lustre/obdclass/cl_object.c
lustre/obdclass/genops.c
lustre/obdclass/lprocfs_jobstats.c
lustre/obdclass/lprocfs_status_server.c
lustre/obdclass/lu_object.c
lustre/obdclass/obd_config.c
lustre/osc/osc_quota.c
lustre/ptlrpc/connection.c
lustre/ptlrpc/nodemap_handler.c
lustre/ptlrpc/nodemap_lproc.c
lustre/ptlrpc/nodemap_member.c
lustre/ptlrpc/nrs_crr.c
lustre/ptlrpc/nrs_orr.c
lustre/ptlrpc/nrs_tbf.c
lustre/quota/lquota_entry.c
lustre/quota/lquota_internal.h
lustre/quota/qmt_internal.h
lustre/quota/qmt_pool.c
lustre/quota/qsd_reint.c

index 4936fbe..a68675b 100755 (executable)
@@ -438,6 +438,20 @@ my %dep_functions = (
 
        'cfs_rcu_head_t',               'struct rcu_head',
 
+       'cfs_hash_lock_t',              'union cfs_hash_lock',
+       'cfs_hash_bucket_t',            'struct cfs_hash_bucket',
+       'cfs_hash_bd_t',                'struct cfs_hash_bd',
+       'cfs_hash_t',                   'struct cfs_hash',
+       'cfs_hash_lock_ops_t',          'struct cfs_hash_lock_ops',
+       'cfs_hash_hlist_ops_t',         'struct cfs_hash_hlist_ops',
+       'cfs_hash_ops_t',               'struct cfs_hash_ops',
+       'cfs_hash_head_t',              'struct cfs_hash_head',
+       'cfs_hash_head_dep_t',          'struct cfs_hash_head_dep',
+       'cfs_hash_dhead_t',             'struct cfs_hash_dhead',
+       'cfs_hash_dhead_dep_t',         'struct cfs_hash_dhead_dep',
+       'cfs_hash_lookup_intent_t',     'enum cfs_hash_lookup_intent',
+       'cfs_hash_cond_arg_t',          'struct cfs_hash_cond_arg',
+
        'LPROCFS',                      'CONFIG_PROC_FS',
        'alloca',                       'malloc',
        'mktemp',                       'mkstemp',
index 2e80879..da7f06b 100644 (file)
@@ -298,7 +298,23 @@ shrinker_count_objects, [
        AC_DEFINE(HAVE_SHRINKER_COUNT, 1,
                [shrinker has count_objects member])
 ])
+]) # LIBCFS_SHRINKER_COUNT
+
+#
+# Kernel version 3.17 changed hlist_add_after to
+# hlist_add_behind
+#
+AC_DEFUN([LIBCFS_HLIST_ADD_AFTER],[
+LB_CHECK_COMPILE([does function hlist_add_after exist'],
+hlist_add_after, [
+       #include <linux/list.h>
+],[
+       hlist_add_after(NULL, NULL);
+],[
+       AC_DEFINE(HAVE_HLIST_ADD_AFTER, 1,
+               [hlist_add_after is available])
 ])
+]) # LIBCFS_HLIST_ADD_AFTER
 
 #
 # LIBCFS_PROG_LINUX
@@ -337,6 +353,8 @@ LIBCFS_ENABLE_CRC32_ACCEL
 LIBCFS_ENABLE_CRC32C_ACCEL
 # 3.12
 LIBCFS_SHRINKER_COUNT
+# 3.17
+LIBCFS_HLIST_ADD_AFTER
 ]) # LIBCFS_PROG_LINUX
 
 #
index 9b7e7f4..1d07b69 100644 (file)
@@ -41,6 +41,9 @@
 
 #ifndef __LIBCFS_HASH_H__
 #define __LIBCFS_HASH_H__
+
+#include <linux/hash.h>
+
 /*
  * Knuth recommends primes in approximately golden ratio to the maximum
  * integer representable by a machine word for multiplicative hashing.
 /*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
 #define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
 
-/*
- * Ideally we would use HAVE_HASH_LONG for this, but on linux we configure
- * the linux kernel and user space at the same time, so we need to differentiate
- * between them explicitely. If this is not needed on other architectures, then
- * we'll need to move the functions to archi specific headers.
- */
-
-#ifdef __KERNEL__
-# include <linux/hash.h>
-#else /* __KERNEL__ */
-/* Fast hashing routine for a long.
-   (C) 2002 William Lee Irwin III, IBM */
-
-# if BITS_PER_LONG == 32
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#  define CFS_GOLDEN_RATIO_PRIME          CFS_GOLDEN_RATIO_PRIME_32
-# elif BITS_PER_LONG == 64
-/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#  define CFS_GOLDEN_RATIO_PRIME          CFS_GOLDEN_RATIO_PRIME_64
-# else
-#  error Define CFS_GOLDEN_RATIO_PRIME for your wordsize.
-# endif /* BITS_PER_LONG == 64 */
-
-static inline unsigned long hash_long(unsigned long val, unsigned int bits)
-{
-       unsigned long hash = val;
-
-# if BITS_PER_LONG == 64
-       /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
-       unsigned long n = hash;
-       n <<= 18;
-       hash -= n;
-       n <<= 33;
-       hash -= n;
-       n <<= 3;
-       hash += n;
-       n <<= 3;
-       hash -= n;
-       n <<= 4;
-       hash += n;
-       n <<= 2;
-       hash += n;
-# else /* BITS_PER_LONG == 64 */
-       /* On some cpus multiply is faster, on others gcc will do shifts */
-       hash *= CFS_GOLDEN_RATIO_PRIME;
-# endif /* BITS_PER_LONG != 64 */
-
-       /* High bits are more random, so use them. */
-       return hash >> (BITS_PER_LONG - bits);
-}
-#endif /* !__KERNEL__ */
-
 /** disable debug */
-#define CFS_HASH_DEBUG_NONE         0
+#define CFS_HASH_DEBUG_NONE    0
 /** record hash depth and output to console when it's too deep,
  *  computing overhead is low but consume more memory */
-#define CFS_HASH_DEBUG_1            1
+#define CFS_HASH_DEBUG_1       1
 /** expensive, check key validation */
-#define CFS_HASH_DEBUG_2            2
+#define CFS_HASH_DEBUG_2       2
 
-#define CFS_HASH_DEBUG_LEVEL        CFS_HASH_DEBUG_NONE
+#define CFS_HASH_DEBUG_LEVEL   CFS_HASH_DEBUG_NONE
 
 struct cfs_hash_ops;
 struct cfs_hash_lock_ops;
 struct cfs_hash_hlist_ops;
 
-typedef union {
+union cfs_hash_lock {
        rwlock_t                rw;             /**< rwlock */
        spinlock_t              spin;           /**< spinlock */
-} cfs_hash_lock_t;
+};
 
 /**
  * cfs_hash_bucket is a container of:
- * - lock, couter ...
+ * - lock, counter ...
  * - array of hash-head starting from hsb_head[0], hash-head can be one of
- *   . cfs_hash_head_t
- *   . cfs_hash_head_dep_t
- *   . cfs_hash_dhead_t
- *   . cfs_hash_dhead_dep_t
+ *   . struct cfs_hash_head
+ *   . struct cfs_hash_head_dep
+ *   . struct cfs_hash_dhead
+ *   . struct cfs_hash_dhead_dep
  *   which depends on requirement of user
  * - some extra bytes (caller can require it while creating hash)
  */
-typedef struct cfs_hash_bucket {
-       cfs_hash_lock_t         hsb_lock;       /**< bucket lock */
+struct cfs_hash_bucket {
+       union cfs_hash_lock     hsb_lock;       /**< bucket lock */
        __u32                   hsb_count;      /**< current entries */
        __u32                   hsb_version;    /**< change version */
        unsigned int            hsb_index;      /**< index of bucket */
        int                     hsb_depmax;     /**< max depth on bucket */
        long                    hsb_head[0];    /**< hash-head array */
-} cfs_hash_bucket_t;
+};
 
 /**
  * cfs_hash bucket descriptor, it's normally in stack of caller
  */
-typedef struct cfs_hash_bd {
-        cfs_hash_bucket_t          *bd_bucket;      /**< address of bucket */
-        unsigned int                bd_offset;      /**< offset in bucket */
-} cfs_hash_bd_t;
+struct cfs_hash_bd {
+       /**< address of bucket */
+       struct cfs_hash_bucket  *bd_bucket;
+       /**< offset in bucket */
+       unsigned int             bd_offset;
+};
 
 #define CFS_HASH_NAME_LEN           16      /**< default name length */
 #define CFS_HASH_BIGNAME_LEN        64      /**< bigname for param tree */
@@ -179,7 +132,7 @@ enum cfs_hash_tag {
         CFS_HASH_NO_BKTLOCK     = 1 << 1,
         /** rwlock to protect bucket */
         CFS_HASH_RW_BKTLOCK     = 1 << 2,
-        /** spinlcok to protect bucket */
+       /** spinlock to protect bucket */
         CFS_HASH_SPIN_BKTLOCK   = 1 << 3,
         /** always add new item to tail */
         CFS_HASH_ADD_TAIL       = 1 << 4,
@@ -251,20 +204,20 @@ enum cfs_hash_tag {
  * locations; additions must take care to only insert into the new bucket.
  */
 
-typedef struct cfs_hash {
-        /** serialize with rehash, or serialize all operations if
-         * the hash-table has CFS_HASH_NO_BKTLOCK */
-        cfs_hash_lock_t             hs_lock;
-        /** hash operations */
-        struct cfs_hash_ops        *hs_ops;
-        /** hash lock operations */
-        struct cfs_hash_lock_ops   *hs_lops;
-        /** hash list operations */
-        struct cfs_hash_hlist_ops  *hs_hops;
-        /** hash buckets-table */
-       cfs_hash_bucket_t         **hs_buckets;
+struct cfs_hash {
+       /** serialize with rehash, or serialize all operations if
+        * the hash-table has CFS_HASH_NO_BKTLOCK */
+       union cfs_hash_lock             hs_lock;
+       /** hash operations */
+       struct cfs_hash_ops             *hs_ops;
+       /** hash lock operations */
+       struct cfs_hash_lock_ops        *hs_lops;
+       /** hash list operations */
+       struct cfs_hash_hlist_ops       *hs_hops;
+       /** hash buckets-table */
+       struct cfs_hash_bucket          **hs_buckets;
        /** total number of items on this hash-table */
-       atomic_t                hs_count;
+       atomic_t                        hs_count;
        /** hash flags, see cfs_hash_tag for detail */
        __u16                       hs_flags;
        /** # of extra-bytes for bucket, for user saving extended attributes */
@@ -292,14 +245,14 @@ typedef struct cfs_hash {
         /** # of iterators (caller of cfs_hash_for_each_*) */
         __u32                       hs_iterators;
        /** rehash workitem */
-       cfs_workitem_t              hs_rehash_wi;
+       cfs_workitem_t                  hs_rehash_wi;
        /** refcount on this hash table */
-       atomic_t                    hs_refcount;
+       atomic_t                        hs_refcount;
        /** rehash buckets-table */
-       cfs_hash_bucket_t         **hs_rehash_buckets;
+       struct cfs_hash_bucket          **hs_rehash_buckets;
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
         /** serialize debug members */
-       spinlock_t                      hs_dep_lock;
+       spinlock_t                  hs_dep_lock;
         /** max depth */
         unsigned int                hs_dep_max;
         /** id of the deepest bucket */
@@ -313,35 +266,35 @@ typedef struct cfs_hash {
 #endif
         /** name of htable */
         char                        hs_name[0];
-} cfs_hash_t;
+};
 
-typedef struct cfs_hash_lock_ops {
+struct cfs_hash_lock_ops {
         /** lock the hash table */
-        void    (*hs_lock)(cfs_hash_lock_t *lock, int exclusive);
+       void    (*hs_lock)(union cfs_hash_lock *lock, int exclusive);
         /** unlock the hash table */
-        void    (*hs_unlock)(cfs_hash_lock_t *lock, int exclusive);
+       void    (*hs_unlock)(union cfs_hash_lock *lock, int exclusive);
         /** lock the hash bucket */
-        void    (*hs_bkt_lock)(cfs_hash_lock_t *lock, int exclusive);
+       void    (*hs_bkt_lock)(union cfs_hash_lock *lock, int exclusive);
         /** unlock the hash bucket */
-        void    (*hs_bkt_unlock)(cfs_hash_lock_t *lock, int exclusive);
-} cfs_hash_lock_ops_t;
+       void    (*hs_bkt_unlock)(union cfs_hash_lock *lock, int exclusive);
+};
 
-typedef struct cfs_hash_hlist_ops {
+struct cfs_hash_hlist_ops {
        /** return hlist_head of hash-head of @bd */
-       struct hlist_head *(*hop_hhead)(cfs_hash_t *hs, cfs_hash_bd_t *bd);
+       struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, struct cfs_hash_bd *bd);
        /** return hash-head size */
-       int (*hop_hhead_size)(cfs_hash_t *hs);
+       int (*hop_hhead_size)(struct cfs_hash *hs);
        /** add @hnode to hash-head of @bd */
-       int (*hop_hnode_add)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+       int (*hop_hnode_add)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                struct hlist_node *hnode);
        /** remove @hnode from hash-head of @bd */
-       int (*hop_hnode_del)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+       int (*hop_hnode_del)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                struct hlist_node *hnode);
-} cfs_hash_hlist_ops_t;
+};
 
-typedef struct cfs_hash_ops {
+struct cfs_hash_ops {
        /** return hashed value from @key */
-       unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask);
+       unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, unsigned mask);
        /** return key address of @hnode */
        void *   (*hs_key)(struct hlist_node *hnode);
        /** copy key from @hnode to @key */
@@ -354,14 +307,14 @@ typedef struct cfs_hash_ops {
        /** return object address of @hnode, i.e: container_of(...hnode) */
        void *   (*hs_object)(struct hlist_node *hnode);
        /** get refcount of item, always called with holding bucket-lock */
-       void     (*hs_get)(cfs_hash_t *hs, struct hlist_node *hnode);
+       void     (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode);
        /** release refcount of item */
-       void     (*hs_put)(cfs_hash_t *hs, struct hlist_node *hnode);
+       void     (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode);
        /** release refcount of item, always called with holding bucket-lock */
-       void     (*hs_put_locked)(cfs_hash_t *hs, struct hlist_node *hnode);
+       void     (*hs_put_locked)(struct cfs_hash *hs, struct hlist_node *hnode);
        /** it's called before removing of @hnode */
-       void     (*hs_exit)(cfs_hash_t *hs, struct hlist_node *hnode);
-} cfs_hash_ops_t;
+       void     (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode);
+};
 
 /** total number of buckets in @hs */
 #define CFS_HASH_NBKT(hs)       \
@@ -381,41 +334,41 @@ typedef struct cfs_hash_ops {
 #define CFS_HASH_RH_NHLIST(hs)  (1U << (hs)->hs_rehash_bits)
 
 static inline int
-cfs_hash_with_no_lock(cfs_hash_t *hs)
+cfs_hash_with_no_lock(struct cfs_hash *hs)
 {
         /* caller will serialize all operations for this hash-table */
         return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
 }
 
 static inline int
-cfs_hash_with_no_bktlock(cfs_hash_t *hs)
+cfs_hash_with_no_bktlock(struct cfs_hash *hs)
 {
         /* no bucket lock, one single lock to protect the hash-table */
         return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
 }
 
 static inline int
-cfs_hash_with_rw_bktlock(cfs_hash_t *hs)
+cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
 {
         /* rwlock to protect hash bucket */
         return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
 }
 
 static inline int
-cfs_hash_with_spin_bktlock(cfs_hash_t *hs)
+cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
 {
         /* spinlock to protect hash bucket */
         return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
 }
 
 static inline int
-cfs_hash_with_add_tail(cfs_hash_t *hs)
+cfs_hash_with_add_tail(struct cfs_hash *hs)
 {
         return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
 }
 
 static inline int
-cfs_hash_with_no_itemref(cfs_hash_t *hs)
+cfs_hash_with_no_itemref(struct cfs_hash *hs)
 {
         /* hash-table doesn't keep refcount on item,
          * item can't be removed from hash unless it's
@@ -424,169 +377,163 @@ cfs_hash_with_no_itemref(cfs_hash_t *hs)
 }
 
 static inline int
-cfs_hash_with_bigname(cfs_hash_t *hs)
+cfs_hash_with_bigname(struct cfs_hash *hs)
 {
         return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
 }
 
 static inline int
-cfs_hash_with_counter(cfs_hash_t *hs)
+cfs_hash_with_counter(struct cfs_hash *hs)
 {
         return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
 }
 
 static inline int
-cfs_hash_with_rehash(cfs_hash_t *hs)
+cfs_hash_with_rehash(struct cfs_hash *hs)
 {
         return (hs->hs_flags & CFS_HASH_REHASH) != 0;
 }
 
 static inline int
-cfs_hash_with_rehash_key(cfs_hash_t *hs)
+cfs_hash_with_rehash_key(struct cfs_hash *hs)
 {
         return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
 }
 
 static inline int
-cfs_hash_with_shrink(cfs_hash_t *hs)
+cfs_hash_with_shrink(struct cfs_hash *hs)
 {
         return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
 }
 
 static inline int
-cfs_hash_with_assert_empty(cfs_hash_t *hs)
+cfs_hash_with_assert_empty(struct cfs_hash *hs)
 {
         return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
 }
 
 static inline int
-cfs_hash_with_depth(cfs_hash_t *hs)
+cfs_hash_with_depth(struct cfs_hash *hs)
 {
         return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
 }
 
 static inline int
-cfs_hash_with_nblk_change(cfs_hash_t *hs)
+cfs_hash_with_nblk_change(struct cfs_hash *hs)
 {
         return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
 }
 
 static inline int
-cfs_hash_is_exiting(cfs_hash_t *hs)
+cfs_hash_is_exiting(struct cfs_hash *hs)
 {       /* cfs_hash_destroy is called */
         return hs->hs_exiting;
 }
 
 static inline int
-cfs_hash_is_rehashing(cfs_hash_t *hs)
+cfs_hash_is_rehashing(struct cfs_hash *hs)
 {       /* rehash is launched */
         return hs->hs_rehash_bits != 0;
 }
 
 static inline int
-cfs_hash_is_iterating(cfs_hash_t *hs)
+cfs_hash_is_iterating(struct cfs_hash *hs)
 {       /* someone is calling cfs_hash_for_each_* */
         return hs->hs_iterating || hs->hs_iterators != 0;
 }
 
 static inline int
-cfs_hash_bkt_size(cfs_hash_t *hs)
+cfs_hash_bkt_size(struct cfs_hash *hs)
 {
-        return offsetof(cfs_hash_bucket_t, hsb_head[0]) +
+       return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
                hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
                hs->hs_extra_bytes;
 }
 
-#define CFS_HOP(hs, op)           (hs)->hs_ops->hs_ ## op
-
 static inline unsigned
-cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask)
+cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
 {
-       return CFS_HOP(hs, hash)(hs, key, mask);
+       return hs->hs_ops->hs_hash(hs, key, mask);
 }
 
 static inline void *
-cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode)
 {
-       return CFS_HOP(hs, key)(hnode);
+       return hs->hs_ops->hs_key(hnode);
 }
 
 static inline void
-cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key)
+cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
 {
-       if (CFS_HOP(hs, keycpy) != NULL)
-               CFS_HOP(hs, keycpy)(hnode, key);
+       if (hs->hs_ops->hs_keycpy != NULL)
+               hs->hs_ops->hs_keycpy(hnode, key);
 }
 
 /**
  * Returns 1 on a match,
  */
 static inline int
-cfs_hash_keycmp(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 {
-       return CFS_HOP(hs, keycmp)(key, hnode);
+       return hs->hs_ops->hs_keycmp(key, hnode);
 }
 
 static inline void *
-cfs_hash_object(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode)
 {
-       return CFS_HOP(hs, object)(hnode);
+       return hs->hs_ops->hs_object(hnode);
 }
 
 static inline void
-cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
-       return CFS_HOP(hs, get)(hs, hnode);
+       return hs->hs_ops->hs_get(hs, hnode);
 }
 
 static inline void
-cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
-       LASSERT(CFS_HOP(hs, put_locked) != NULL);
-
-       return CFS_HOP(hs, put_locked)(hs, hnode);
+       return hs->hs_ops->hs_put_locked(hs, hnode);
 }
 
 static inline void
-cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
-       LASSERT(CFS_HOP(hs, put) != NULL);
-
-       return CFS_HOP(hs, put)(hs, hnode);
+       return hs->hs_ops->hs_put(hs, hnode);
 }
 
 static inline void
-cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
-       if (CFS_HOP(hs, exit))
-               CFS_HOP(hs, exit)(hs, hnode);
+       if (hs->hs_ops->hs_exit)
+               hs->hs_ops->hs_exit(hs, hnode);
 }
 
-static inline void cfs_hash_lock(cfs_hash_t *hs, int excl)
+static inline void cfs_hash_lock(struct cfs_hash *hs, int excl)
 {
         hs->hs_lops->hs_lock(&hs->hs_lock, excl);
 }
 
-static inline void cfs_hash_unlock(cfs_hash_t *hs, int excl)
+static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl)
 {
         hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
 }
 
-static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
+static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs,
                                        atomic_t *condition)
 {
        LASSERT(cfs_hash_with_no_bktlock(hs));
        return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
 }
 
-static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
-                                    cfs_hash_bd_t *bd, int excl)
+static inline void cfs_hash_bd_lock(struct cfs_hash *hs,
+                                   struct cfs_hash_bd *bd, int excl)
 {
         hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
 }
 
-static inline void cfs_hash_bd_unlock(cfs_hash_t *hs,
-                                      cfs_hash_bd_t *bd, int excl)
+static inline void cfs_hash_bd_unlock(struct cfs_hash *hs,
+                                     struct cfs_hash_bd *bd, int excl)
 {
         hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
 }
@@ -595,56 +542,60 @@ static inline void cfs_hash_bd_unlock(cfs_hash_t *hs,
  * operations on cfs_hash bucket (bd: bucket descriptor),
  * they are normally for hash-table without rehash
  */
-void cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd);
+void cfs_hash_bd_get(struct cfs_hash *hs, const void *key,
+                    struct cfs_hash_bd *bd);
 
-static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, const void *key,
-                                            cfs_hash_bd_t *bd, int excl)
+static inline void
+cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key,
+                        struct cfs_hash_bd *bd, int excl)
 {
         cfs_hash_bd_get(hs, key, bd);
         cfs_hash_bd_lock(hs, bd, excl);
 }
 
-static inline unsigned cfs_hash_bd_index_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+static inline unsigned
+cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
         return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
 }
 
-static inline void cfs_hash_bd_index_set(cfs_hash_t *hs,
-                                         unsigned index, cfs_hash_bd_t *bd)
+static inline void
+cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index,
+                     struct cfs_hash_bd *bd)
 {
         bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
         bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
 }
 
 static inline void *
-cfs_hash_bd_extra_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
         return (void *)bd->bd_bucket +
                cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
 }
 
 static inline __u32
-cfs_hash_bd_version_get(cfs_hash_bd_t *bd)
+cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
 {
         /* need hold cfs_hash_bd_lock */
         return bd->bd_bucket->hsb_version;
 }
 
 static inline __u32
-cfs_hash_bd_count_get(cfs_hash_bd_t *bd)
+cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
 {
         /* need hold cfs_hash_bd_lock */
         return bd->bd_bucket->hsb_count;
 }
 
 static inline int
-cfs_hash_bd_depmax_get(cfs_hash_bd_t *bd)
+cfs_hash_bd_depmax_get(struct cfs_hash_bd *bd)
 {
         return bd->bd_bucket->hsb_depmax;
 }
 
 static inline int
-cfs_hash_bd_compare(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
+cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
 {
         if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index)
                 return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index;
@@ -655,121 +606,141 @@ cfs_hash_bd_compare(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
         return 0;
 }
 
-void cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                            struct hlist_node *hnode);
-void cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                            struct hlist_node *hnode);
-void cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
-                            cfs_hash_bd_t *bd_new, struct hlist_node *hnode);
+void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
+                            struct cfs_hash_bd *bd_new,
+                            struct hlist_node *hnode);
 
-static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                          atomic_t *condition)
+static inline int
+cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                        atomic_t *condition)
 {
        LASSERT(cfs_hash_with_spin_bktlock(hs));
        return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
 }
 
-static inline struct hlist_head *cfs_hash_bd_hhead(cfs_hash_t *hs,
-                                                  cfs_hash_bd_t *bd)
+static inline struct hlist_head *
+cfs_hash_bd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
        return hs->hs_hops->hop_hhead(hs, bd);
 }
 
-struct hlist_node *cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                               const void *key);
-struct hlist_node *cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                               const void *key);
-struct hlist_node *cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                               const void *key,
-                                               struct hlist_node *hnode,
-                                               int insist_add);
-struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                               const void *key,
-                                               struct hlist_node *hnode);
+struct hlist_node *
+cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                         const void *key);
+struct hlist_node *
+cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                       const void *key);
+struct hlist_node *
+cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                          const void *key, struct hlist_node *hnode,
+                          int insist_add);
+struct hlist_node *
+cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                          const void *key, struct hlist_node *hnode);
 
 /**
  * operations on cfs_hash bucket (bd: bucket descriptor),
  * they are safe for hash-table with rehash
  */
-void cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds);
-void cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
-void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
+void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
+                         struct cfs_hash_bd *bds);
+void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
+                          int excl);
+void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
+                            int excl);
 
-static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key,
-                                               cfs_hash_bd_t *bds, int excl)
+static inline void
+cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key,
+                             struct cfs_hash_bd *bds, int excl)
 {
        cfs_hash_dual_bd_get(hs, key, bds);
        cfs_hash_dual_bd_lock(hs, bds, excl);
 }
 
 struct hlist_node *
-cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key);
 struct hlist_node *
-cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key, struct hlist_node *hnode,
                                int insist_add);
 struct hlist_node *
-cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key, struct hlist_node *hnode);
 
 /* Hash init/cleanup functions */
-cfs_hash_t *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
-                               unsigned bkt_bits, unsigned extra_bytes,
-                               unsigned min_theta, unsigned max_theta,
-                               cfs_hash_ops_t *ops, unsigned flags);
+struct cfs_hash *
+cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
+               unsigned bkt_bits, unsigned extra_bytes,
+               unsigned min_theta, unsigned max_theta,
+               struct cfs_hash_ops *ops, unsigned flags);
 
-cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs);
-void cfs_hash_putref(cfs_hash_t *hs);
+struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
+void cfs_hash_putref(struct cfs_hash *hs);
 
 /* Hash addition functions */
-void cfs_hash_add(cfs_hash_t *hs, const void *key,
+void cfs_hash_add(struct cfs_hash *hs, const void *key,
                        struct hlist_node *hnode);
-int cfs_hash_add_unique(cfs_hash_t *hs, const void *key,
+int cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
                        struct hlist_node *hnode);
-void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
-                               struct hlist_node *hnode);
+void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
+                             struct hlist_node *hnode);
 
 /* Hash deletion functions */
-void *cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode);
-void *cfs_hash_del_key(cfs_hash_t *hs, const void *key);
+void *cfs_hash_del(struct cfs_hash *hs, const void *key,
+                  struct hlist_node *hnode);
+void *cfs_hash_del_key(struct cfs_hash *hs, const void *key);
 
 /* Hash lookup/for_each functions */
 #define CFS_HASH_LOOP_HOG       1024
 
-typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                       struct hlist_node *node, void *data);
-void *cfs_hash_lookup(cfs_hash_t *hs, const void *key);
-void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
-void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
-int  cfs_hash_for_each_nolock(cfs_hash_t *hs, cfs_hash_for_each_cb_t,
-                               void *data);
-int  cfs_hash_for_each_empty(cfs_hash_t *hs, cfs_hash_for_each_cb_t,
-                               void *data);
-void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
-                               cfs_hash_for_each_cb_t, void *data);
+typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs,
+                                     struct cfs_hash_bd *bd,
+                                     struct hlist_node *node,
+                                     void *data);
+void *
+cfs_hash_lookup(struct cfs_hash *hs, const void *key);
+void
+cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+void
+cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+int
+cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
+                        void *data);
+int
+cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
+                       void *data);
+void
+cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
+                     cfs_hash_for_each_cb_t, void *data);
 typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
-void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data);
+void
+cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
 
-void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
-                               cfs_hash_for_each_cb_t, void *data);
-int  cfs_hash_is_empty(cfs_hash_t *hs);
-__u64 cfs_hash_size_get(cfs_hash_t *hs);
+void
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
+                       cfs_hash_for_each_cb_t, void *data);
+int  cfs_hash_is_empty(struct cfs_hash *hs);
+__u64 cfs_hash_size_get(struct cfs_hash *hs);
 
 /*
  * Rehash - Theta is calculated to be the average chained
- * hash depth assuming a perfectly uniform hash funcion.
+ * hash depth assuming a perfectly uniform hash function.
  */
-void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs);
-void cfs_hash_rehash_cancel(cfs_hash_t *hs);
-int  cfs_hash_rehash(cfs_hash_t *hs, int do_rehash);
-void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
+void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
+void cfs_hash_rehash_cancel(struct cfs_hash *hs);
+int  cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
+void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
                        void *new_key, struct hlist_node *hnode);
 
 #if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
 /* Validate hnode references the correct key */
 static inline void
-cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
+cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
                      struct hlist_node *hnode)
 {
        LASSERT(cfs_hash_keycmp(hs, key, hnode));
@@ -777,10 +748,10 @@ cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
 
 /* Validate hnode is in the correct bucket */
 static inline void
-cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                        struct hlist_node *hnode)
 {
-       cfs_hash_bd_t bds[2];
+       struct cfs_hash_bd bds[2];
 
        cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
        LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
@@ -790,11 +761,11 @@ cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 #else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
 
 static inline void
-cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
+cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
                        struct hlist_node *hnode) {}
 
 static inline void
-cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                        struct hlist_node *hnode) {}
 
 #endif /* CFS_HASH_DEBUG_LEVEL */
@@ -816,13 +787,14 @@ static inline int __cfs_hash_theta_frac(int theta)
                (__cfs_hash_theta_int(theta) * 1000);
 }
 
-static inline int __cfs_hash_theta(cfs_hash_t *hs)
+static inline int __cfs_hash_theta(struct cfs_hash *hs)
 {
        return (atomic_read(&hs->hs_count) <<
                CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
 }
 
-static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
+static inline void
+__cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
 {
         LASSERT(min < max);
         hs->hs_min_theta = (__u16)min;
@@ -832,7 +804,7 @@ static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
 /* Generic debug formatting routines mainly for proc handler */
 struct seq_file;
 int cfs_hash_debug_header(struct seq_file *m);
-int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m);
+int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
 
 /*
  * Generic djb2 hash algorithm for character arrays.
@@ -868,7 +840,7 @@ cfs_hash_u64_hash(const __u64 key, unsigned mask)
         return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
 }
 
-/** iterate over all buckets in @bds (array of cfs_hash_bd_t) */
+/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
 #define cfs_hash_for_each_bd(bds, n, i) \
         for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++)
 
index f73ce98..353b656 100644 (file)
@@ -509,4 +509,8 @@ static inline void hlist_add_after(struct hlist_node *n,
        hlist_for_each_entry_from(tpos, pos, member)
 #endif
 
+#ifdef HAVE_HLIST_ADD_AFTER
+#define hlist_add_behind(hnode, tail)  hlist_add_after(tail, hnode)
+#endif /* HAVE_HLIST_ADD_AFTER */
+
 #endif /* __LIBCFS_LUSTRE_LIST_H__ */
index 7684fd6..43dc673 100644 (file)
  * - move all stuff to libcfs
  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
- * - buckets are allocated one by one(intead of contiguous memory),
+ * - buckets are allocated one by one(instead of contiguous memory),
  *   to avoid unnecessary cacheline conflict
  *
  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
- * - "bucket" is a group of hlist_head now, user can speicify bucket size
+ * - "bucket" is a group of hlist_head now, user can specify bucket size
  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
  *   one lock for reducing memory overhead.
  *
 
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 static unsigned int warn_on_depth = 8;
-CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
-                "warning when hash depth is high.");
+module_param(warn_on_depth, uint, 0644);
+MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
 #endif
 
 struct cfs_wi_sched *cfs_sched_rehash;
 
 static inline void
-cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
+cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
 
 static inline void
-cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
+cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
 
 static inline void
-cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
-__acquires(&lock->spin)
+cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
+       __acquires(&lock->spin)
 {
        spin_lock(&lock->spin);
 }
 
 static inline void
-cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
-__releases(&lock->spin)
+cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
+       __releases(&lock->spin)
 {
        spin_unlock(&lock->spin);
 }
 
 static inline void
-cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
-__acquires(&lock->rw)
+cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
+       __acquires(&lock->rw)
 {
        if (!exclusive)
                read_lock(&lock->rw);
@@ -149,8 +149,8 @@ __acquires(&lock->rw)
 }
 
 static inline void
-cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
-__releases(&lock->rw)
+cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
+       __releases(&lock->rw)
 {
        if (!exclusive)
                read_unlock(&lock->rw);
@@ -159,61 +159,55 @@ __releases(&lock->rw)
 }
 
 /** No lock hash */
-static cfs_hash_lock_ops_t cfs_hash_nl_lops =
-{
-        .hs_lock        = cfs_hash_nl_lock,
-        .hs_unlock      = cfs_hash_nl_unlock,
-        .hs_bkt_lock    = cfs_hash_nl_lock,
-        .hs_bkt_unlock  = cfs_hash_nl_unlock,
+static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
+       .hs_lock        = cfs_hash_nl_lock,
+       .hs_unlock      = cfs_hash_nl_unlock,
+       .hs_bkt_lock    = cfs_hash_nl_lock,
+       .hs_bkt_unlock  = cfs_hash_nl_unlock,
 };
 
 /** no bucket lock, one spinlock to protect everything */
-static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
-{
-        .hs_lock        = cfs_hash_spin_lock,
-        .hs_unlock      = cfs_hash_spin_unlock,
-        .hs_bkt_lock    = cfs_hash_nl_lock,
-        .hs_bkt_unlock  = cfs_hash_nl_unlock,
+static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
+       .hs_lock        = cfs_hash_spin_lock,
+       .hs_unlock      = cfs_hash_spin_unlock,
+       .hs_bkt_lock    = cfs_hash_nl_lock,
+       .hs_bkt_unlock  = cfs_hash_nl_unlock,
 };
 
 /** spin bucket lock, rehash is enabled */
-static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
-{
-        .hs_lock        = cfs_hash_rw_lock,
-        .hs_unlock      = cfs_hash_rw_unlock,
-        .hs_bkt_lock    = cfs_hash_spin_lock,
-        .hs_bkt_unlock  = cfs_hash_spin_unlock,
+static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
+       .hs_lock        = cfs_hash_rw_lock,
+       .hs_unlock      = cfs_hash_rw_unlock,
+       .hs_bkt_lock    = cfs_hash_spin_lock,
+       .hs_bkt_unlock  = cfs_hash_spin_unlock,
 };
 
 /** rw bucket lock, rehash is enabled */
-static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
-{
-        .hs_lock        = cfs_hash_rw_lock,
-        .hs_unlock      = cfs_hash_rw_unlock,
-        .hs_bkt_lock    = cfs_hash_rw_lock,
-        .hs_bkt_unlock  = cfs_hash_rw_unlock,
+static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
+       .hs_lock        = cfs_hash_rw_lock,
+       .hs_unlock      = cfs_hash_rw_unlock,
+       .hs_bkt_lock    = cfs_hash_rw_lock,
+       .hs_bkt_unlock  = cfs_hash_rw_unlock,
 };
 
 /** spin bucket lock, rehash is disabled */
-static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
-{
-        .hs_lock        = cfs_hash_nl_lock,
-        .hs_unlock      = cfs_hash_nl_unlock,
-        .hs_bkt_lock    = cfs_hash_spin_lock,
-        .hs_bkt_unlock  = cfs_hash_spin_unlock,
+static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
+       .hs_lock        = cfs_hash_nl_lock,
+       .hs_unlock      = cfs_hash_nl_unlock,
+       .hs_bkt_lock    = cfs_hash_spin_lock,
+       .hs_bkt_unlock  = cfs_hash_spin_unlock,
 };
 
 /** rw bucket lock, rehash is disabled */
-static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
-{
-        .hs_lock        = cfs_hash_nl_lock,
-        .hs_unlock      = cfs_hash_nl_unlock,
-        .hs_bkt_lock    = cfs_hash_rw_lock,
-        .hs_bkt_unlock  = cfs_hash_rw_unlock,
+static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
+       .hs_lock        = cfs_hash_nl_lock,
+       .hs_unlock      = cfs_hash_nl_unlock,
+       .hs_bkt_lock    = cfs_hash_rw_lock,
+       .hs_bkt_unlock  = cfs_hash_rw_unlock,
 };
 
 static void
-cfs_hash_lock_setup(cfs_hash_t *hs)
+cfs_hash_lock_setup(struct cfs_hash *hs)
 {
        if (cfs_hash_with_no_lock(hs)) {
                hs->hs_lops = &cfs_hash_nl_lops;
@@ -245,26 +239,27 @@ cfs_hash_lock_setup(cfs_hash_t *hs)
  * Simple hash head without depth tracking
  * new element is always added to head of hlist
  */
-typedef struct {
+struct cfs_hash_head {
        struct hlist_head       hh_head;        /**< entries list */
-} cfs_hash_head_t;
+};
 
 static int
-cfs_hash_hh_hhead_size(cfs_hash_t *hs)
+cfs_hash_hh_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_head_t);
+       return sizeof(struct cfs_hash_head);
 }
 
 static struct hlist_head *
-cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
+       struct cfs_hash_head *head;
 
+       head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].hh_head;
 }
 
 static int
-cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
@@ -272,7 +267,7 @@ cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 static int
-cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        hlist_del_init(hnode);
@@ -283,42 +278,46 @@ cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * Simple hash head with depth tracking
  * new element is always added to head of hlist
  */
-typedef struct {
+struct cfs_hash_head_dep {
        struct hlist_head       hd_head;        /**< entries list */
        unsigned int            hd_depth;       /**< list length */
-} cfs_hash_head_dep_t;
+};
 
 static int
-cfs_hash_hd_hhead_size(cfs_hash_t *hs)
+cfs_hash_hd_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_head_dep_t);
+       return sizeof(struct cfs_hash_head_dep);
 }
 
 static struct hlist_head *
-cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_head_dep_t   *head;
+       struct cfs_hash_head_dep   *head;
 
-       head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
+       head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].hd_head;
 }
 
 static int
-cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
-                                              cfs_hash_head_dep_t, hd_head);
+       struct cfs_hash_head_dep *hh;
+
+       hh = container_of(cfs_hash_hd_hhead(hs, bd),
+                         struct cfs_hash_head_dep, hd_head);
        hlist_add_head(hnode, &hh->hd_head);
        return ++hh->hd_depth;
 }
 
 static int
-cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
-                                              cfs_hash_head_dep_t, hd_head);
+       struct cfs_hash_head_dep *hh;
+
+       hh = container_of(cfs_hash_hd_hhead(hs, bd),
+                         struct cfs_hash_head_dep, hd_head);
        hlist_del_init(hnode);
        return --hh->hd_depth;
 }
@@ -327,35 +326,36 @@ cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * double links hash head without depth tracking
  * new element is always added to tail of hlist
  */
-typedef struct {
+struct cfs_hash_dhead {
        struct hlist_head       dh_head;        /**< entries list */
        struct hlist_node       *dh_tail;       /**< the last entry */
-} cfs_hash_dhead_t;
+};
 
 static int
-cfs_hash_dh_hhead_size(cfs_hash_t *hs)
+cfs_hash_dh_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_dhead_t);
+       return sizeof(struct cfs_hash_dhead);
 }
 
 static struct hlist_head *
-cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_dhead_t *head;
+       struct cfs_hash_dhead *head;
 
-       head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
+       head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].dh_head;
 }
 
 static int
-cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
-                                           cfs_hash_dhead_t, dh_head);
+       struct cfs_hash_dhead *dh;
 
+       dh = container_of(cfs_hash_dh_hhead(hs, bd),
+                         struct cfs_hash_dhead, dh_head);
        if (dh->dh_tail != NULL) /* not empty */
-               hlist_add_after(dh->dh_tail, hnode);
+               hlist_add_behind(hnode, dh->dh_tail);
        else /* empty list */
                hlist_add_head(hnode, &dh->dh_head);
        dh->dh_tail = hnode;
@@ -363,12 +363,13 @@ cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 static int
-cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnd)
 {
-       cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
-                                           cfs_hash_dhead_t, dh_head);
+       struct cfs_hash_dhead *dh;
 
+       dh = container_of(cfs_hash_dh_hhead(hs, bd),
+                         struct cfs_hash_dhead, dh_head);
        if (hnd->next == NULL) { /* it's the tail */
                dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
                              container_of(hnd->pprev, struct hlist_node, next);
@@ -381,36 +382,37 @@ cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * double links hash head with depth tracking
  * new element is always added to tail of hlist
  */
-typedef struct {
+struct cfs_hash_dhead_dep {
        struct hlist_head       dd_head;        /**< entries list */
        struct hlist_node       *dd_tail;       /**< the last entry */
        unsigned int            dd_depth;       /**< list length */
-} cfs_hash_dhead_dep_t;
+};
 
 static int
-cfs_hash_dd_hhead_size(cfs_hash_t *hs)
+cfs_hash_dd_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_dhead_dep_t);
+       return sizeof(struct cfs_hash_dhead_dep);
 }
 
 static struct hlist_head *
-cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_dhead_dep_t *head;
+       struct cfs_hash_dhead_dep *head;
 
-       head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
+       head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].dd_head;
 }
 
 static int
-cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
-                                               cfs_hash_dhead_dep_t, dd_head);
+       struct cfs_hash_dhead_dep *dh;
 
+       dh = container_of(cfs_hash_dd_hhead(hs, bd),
+                         struct cfs_hash_dhead_dep, dd_head);
        if (dh->dd_tail != NULL) /* not empty */
-               hlist_add_after(dh->dd_tail, hnode);
+               hlist_add_behind(hnode, dh->dd_tail);
        else /* empty list */
                hlist_add_head(hnode, &dh->dd_head);
        dh->dd_tail = hnode;
@@ -418,12 +420,13 @@ cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 static int
-cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnd)
 {
-       cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
-                                               cfs_hash_dhead_dep_t, dd_head);
+       struct cfs_hash_dhead_dep *dh;
 
+       dh = container_of(cfs_hash_dd_hhead(hs, bd),
+                         struct cfs_hash_dhead_dep, dd_head);
        if (hnd->next == NULL) { /* it's the tail */
                dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
                              container_of(hnd->pprev, struct hlist_node, next);
@@ -432,28 +435,28 @@ cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
        return --dh->dd_depth;
 }
 
-static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
+static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
        .hop_hhead      = cfs_hash_hh_hhead,
        .hop_hhead_size = cfs_hash_hh_hhead_size,
        .hop_hnode_add  = cfs_hash_hh_hnode_add,
        .hop_hnode_del  = cfs_hash_hh_hnode_del,
 };
 
-static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
+static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
        .hop_hhead      = cfs_hash_hd_hhead,
        .hop_hhead_size = cfs_hash_hd_hhead_size,
        .hop_hnode_add  = cfs_hash_hd_hnode_add,
        .hop_hnode_del  = cfs_hash_hd_hnode_del,
 };
 
-static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
+static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
        .hop_hhead      = cfs_hash_dh_hhead,
        .hop_hhead_size = cfs_hash_dh_hhead_size,
        .hop_hnode_add  = cfs_hash_dh_hnode_add,
        .hop_hnode_del  = cfs_hash_dh_hnode_del,
 };
 
-static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
+static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
        .hop_hhead      = cfs_hash_dd_hhead,
        .hop_hhead_size = cfs_hash_dd_hhead_size,
        .hop_hnode_add  = cfs_hash_dd_hnode_add,
@@ -461,7 +464,7 @@ static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
 };
 
 static void
-cfs_hash_hlist_setup(cfs_hash_t *hs)
+cfs_hash_hlist_setup(struct cfs_hash *hs)
 {
         if (cfs_hash_with_add_tail(hs)) {
                 hs->hs_hops = cfs_hash_with_depth(hs) ?
@@ -473,8 +476,8 @@ cfs_hash_hlist_setup(cfs_hash_t *hs)
 }
 
 static void
-cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
-                     unsigned int bits, const void *key, cfs_hash_bd_t *bd)
+cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
+                    unsigned int bits, const void *key, struct cfs_hash_bd *bd)
 {
         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
 
@@ -485,7 +488,7 @@ cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
 }
 
 void
-cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
+cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
 {
         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
         if (likely(hs->hs_rehash_buckets == NULL)) {
@@ -500,7 +503,7 @@ cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
 EXPORT_SYMBOL(cfs_hash_bd_get);
 
 static inline void
-cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
+cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
 {
         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
                 return;
@@ -523,7 +526,7 @@ cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
 }
 
 void
-cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                        struct hlist_node *hnode)
 {
        int rc;
@@ -543,7 +546,7 @@ cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
 
 void
-cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                       struct hlist_node *hnode)
 {
        hs->hs_hops->hop_hnode_del(hs, bd, hnode);
@@ -564,11 +567,11 @@ cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
 
 void
-cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
-                       cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
+cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
+                       struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
 {
-        cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
-        cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
+       struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
+       struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
         int                rc;
 
         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
@@ -603,7 +606,7 @@ enum {
         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
 };
 
-typedef enum cfs_hash_lookup_intent {
+enum cfs_hash_lookup_intent {
         /** return item w/o refcount */
         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
         /** return item with refcount */
@@ -618,12 +621,12 @@ typedef enum cfs_hash_lookup_intent {
         /** delete if existed */
         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
                                        CFS_HS_LOOKUP_MASK_DEL)
-} cfs_hash_lookup_intent_t;
+};
 
 static struct hlist_node *
-cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                          const void *key, struct hlist_node *hnode,
-                         cfs_hash_lookup_intent_t intent)
+                         enum cfs_hash_lookup_intent intent)
 
 {
        struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
@@ -662,7 +665,8 @@ cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 struct hlist_node *
-cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                         const void *key)
 {
        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
                                        CFS_HS_LOOKUP_IT_FIND);
@@ -670,7 +674,8 @@ cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
 
 struct hlist_node *
-cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                       const void *key)
 {
        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
                                        CFS_HS_LOOKUP_IT_PEEK);
@@ -678,7 +683,7 @@ cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
 
 struct hlist_node *
-cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                           const void *key, struct hlist_node *hnode,
                            int noref)
 {
@@ -689,7 +694,7 @@ cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
 
 struct hlist_node *
-cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                           const void *key, struct hlist_node *hnode)
 {
        /* hnode can be NULL, we find the first item with @key */
@@ -699,10 +704,10 @@ cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
 
 static void
-cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                        unsigned n, int excl)
 {
-        cfs_hash_bucket_t *prev = NULL;
+       struct cfs_hash_bucket *prev = NULL;
         int                i;
 
         /**
@@ -722,10 +727,10 @@ cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 }
 
 static void
-cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                          unsigned n, int excl)
 {
-        cfs_hash_bucket_t *prev = NULL;
+       struct cfs_hash_bucket *prev = NULL;
         int                i;
 
         cfs_hash_for_each_bd(bds, n, i) {
@@ -737,7 +742,7 @@ cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 }
 
 static struct hlist_node *
-cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                unsigned n, const void *key)
 {
        struct hlist_node *ehnode;
@@ -753,8 +758,8 @@ cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 }
 
 static struct hlist_node *
-cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
-                                cfs_hash_bd_t *bds, unsigned n, const void *key,
+cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
+                                unsigned n, const void *key,
                                 struct hlist_node *hnode, int noref)
 {
        struct hlist_node *ehnode;
@@ -774,7 +779,7 @@ cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
         if (i == 1) { /* only one bucket */
                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
         } else {
-                cfs_hash_bd_t      mybd;
+               struct cfs_hash_bd      mybd;
 
                 cfs_hash_bd_get(hs, key, &mybd);
                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
@@ -784,7 +789,7 @@ cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
 }
 
 static struct hlist_node *
-cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                 unsigned n, const void *key,
                                 struct hlist_node *hnode)
 {
@@ -801,7 +806,7 @@ cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 }
 
 static void
-cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
+cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
 {
         int     rc;
 
@@ -819,7 +824,7 @@ cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
                 bd2->bd_bucket = NULL;
 
         } else if (rc > 0) { /* swab bd1 and bd2 */
-                cfs_hash_bd_t tmp;
+               struct cfs_hash_bd tmp;
 
                 tmp = *bd2;
                 *bd2 = *bd1;
@@ -828,7 +833,8 @@ cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
 }
 
 void
-cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
+cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
+                    struct cfs_hash_bd *bds)
 {
         /* NB: caller should hold hs_lock.rw if REHASH is set */
         cfs_hash_bd_from_key(hs, hs->hs_buckets,
@@ -848,21 +854,21 @@ cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
 
 void
-cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
+cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
 {
         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
 }
 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
 
 void
-cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
+cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
 {
         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
 }
 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
 
 struct hlist_node *
-cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key)
 {
         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
@@ -870,7 +876,7 @@ cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
 
 struct hlist_node *
-cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key, struct hlist_node *hnode,
                                int noref)
 {
@@ -880,7 +886,7 @@ cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
 
 struct hlist_node *
-cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key, struct hlist_node *hnode)
 {
        return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
@@ -888,7 +894,7 @@ cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
 
 static void
-cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
+cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
                       int bkt_size, int prev_size, int size)
 {
         int     i;
@@ -906,11 +912,11 @@ cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
  * needed, the newly allocated buckets if allocation was needed and
  * successful, and NULL on error.
  */
-static cfs_hash_bucket_t **
-cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
+static struct cfs_hash_bucket **
+cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
                          unsigned int old_size, unsigned int new_size)
 {
-        cfs_hash_bucket_t **new_bkts;
+       struct cfs_hash_bucket **new_bkts;
         int                 i;
 
         LASSERT(old_size == 0 || old_bkts != NULL);
@@ -929,7 +935,7 @@ cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
 
        for (i = old_size; i < new_size; i++) {
                struct hlist_head *hhead;
-               cfs_hash_bd_t     bd;
+               struct cfs_hash_bd     bd;
 
                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
                 if (new_bkts[i] == NULL) {
@@ -973,7 +979,7 @@ static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 static int cfs_hash_dep_print(cfs_workitem_t *wi)
 {
-       cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+       struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
        int         dep;
        int         bkt;
        int         off;
@@ -994,13 +1000,13 @@ static int cfs_hash_dep_print(cfs_workitem_t *wi)
        return 0;
 }
 
-static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
+static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
 {
        spin_lock_init(&hs->hs_dep_lock);
        cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
 }
 
-static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
+static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
 {
        if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
                return;
@@ -1016,18 +1022,18 @@ static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
 
 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
 
-static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
-static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
+static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
+static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
 
 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
 
-cfs_hash_t *
+struct cfs_hash *
 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
                 unsigned bkt_bits, unsigned extra_bytes,
                 unsigned min_theta, unsigned max_theta,
-                cfs_hash_ops_t *ops, unsigned flags)
+               struct cfs_hash_ops *ops, unsigned flags)
 {
-        cfs_hash_t *hs;
+       struct cfs_hash *hs;
         int         len;
 
         ENTRY;
@@ -1057,7 +1063,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
 
         len = (flags & CFS_HASH_BIGNAME) == 0 ?
               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
-        LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
+       LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
         if (hs == NULL)
                 RETURN(NULL);
 
@@ -1089,7 +1095,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
         if (hs->hs_buckets != NULL)
                 return hs;
 
-        LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
+       LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
         RETURN(NULL);
 }
 EXPORT_SYMBOL(cfs_hash_create);
@@ -1098,11 +1104,11 @@ EXPORT_SYMBOL(cfs_hash_create);
  * Cleanup libcfs hash @hs.
  */
 static void
-cfs_hash_destroy(cfs_hash_t *hs)
+cfs_hash_destroy(struct cfs_hash *hs)
 {
        struct hlist_node     *hnode;
        struct hlist_node     *pos;
-       cfs_hash_bd_t         bd;
+       struct cfs_hash_bd         bd;
        int                   i;
        ENTRY;
 
@@ -1154,12 +1160,12 @@ cfs_hash_destroy(cfs_hash_t *hs)
                              0, CFS_HASH_NBKT(hs));
        i = cfs_hash_with_bigname(hs) ?
            CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
-       LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
+       LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
 
        EXIT;
 }
 
-cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
+struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
 {
        if (atomic_inc_not_zero(&hs->hs_refcount))
                return hs;
@@ -1167,7 +1173,7 @@ cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
 }
 EXPORT_SYMBOL(cfs_hash_getref);
 
-void cfs_hash_putref(cfs_hash_t *hs)
+void cfs_hash_putref(struct cfs_hash *hs)
 {
        if (atomic_dec_and_test(&hs->hs_refcount))
                cfs_hash_destroy(hs);
@@ -1175,7 +1181,7 @@ void cfs_hash_putref(cfs_hash_t *hs)
 EXPORT_SYMBOL(cfs_hash_putref);
 
 static inline int
-cfs_hash_rehash_bits(cfs_hash_t *hs)
+cfs_hash_rehash_bits(struct cfs_hash *hs)
 {
         if (cfs_hash_with_no_lock(hs) ||
             !cfs_hash_with_rehash(hs))
@@ -1212,7 +1218,7 @@ cfs_hash_rehash_bits(cfs_hash_t *hs)
  * - too many elements
  */
 static inline int
-cfs_hash_rehash_inline(cfs_hash_t *hs)
+cfs_hash_rehash_inline(struct cfs_hash *hs)
 {
        return !cfs_hash_with_nblk_change(hs) &&
               atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
@@ -1223,9 +1229,9 @@ cfs_hash_rehash_inline(cfs_hash_t *hs)
  * ops->hs_get function will be called when the item is added.
  */
 void
-cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 {
-        cfs_hash_bd_t   bd;
+       struct cfs_hash_bd   bd;
         int             bits;
 
        LASSERT(hlist_unhashed(hnode));
@@ -1246,11 +1252,11 @@ cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
 EXPORT_SYMBOL(cfs_hash_add);
 
 static struct hlist_node *
-cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
+cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
                     struct hlist_node *hnode, int noref)
 {
        struct hlist_node *ehnode;
-       cfs_hash_bd_t     bds[2];
+       struct cfs_hash_bd     bds[2];
        int               bits = 0;
 
        LASSERT(hlist_unhashed(hnode));
@@ -1278,7 +1284,8 @@ cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
  * Returns 0 on success or -EALREADY on key collisions.
  */
 int
-cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
+                   struct hlist_node *hnode)
 {
        return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
               -EALREADY : 0;
@@ -1292,7 +1299,7 @@ EXPORT_SYMBOL(cfs_hash_add_unique);
  * Otherwise ops->hs_get is called on the item which was added.
  */
 void *
-cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
+cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
                        struct hlist_node *hnode)
 {
        hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
@@ -1309,11 +1316,11 @@ EXPORT_SYMBOL(cfs_hash_findadd_unique);
  * on the removed object.
  */
 void *
-cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 {
         void           *obj  = NULL;
         int             bits = 0;
-        cfs_hash_bd_t   bds[2];
+       struct cfs_hash_bd   bds[2];
 
         cfs_hash_lock(hs, 0);
         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
@@ -1349,7 +1356,7 @@ EXPORT_SYMBOL(cfs_hash_del);
  * will be returned and ops->hs_put is called on the removed object.
  */
 void *
-cfs_hash_del_key(cfs_hash_t *hs, const void *key)
+cfs_hash_del_key(struct cfs_hash *hs, const void *key)
 {
         return cfs_hash_del(hs, key, NULL);
 }
@@ -1364,11 +1371,11 @@ EXPORT_SYMBOL(cfs_hash_del_key);
  * in the hash @hs NULL is returned.
  */
 void *
-cfs_hash_lookup(cfs_hash_t *hs, const void *key)
+cfs_hash_lookup(struct cfs_hash *hs, const void *key)
 {
         void                 *obj = NULL;
        struct hlist_node     *hnode;
-        cfs_hash_bd_t         bds[2];
+       struct cfs_hash_bd         bds[2];
 
         cfs_hash_lock(hs, 0);
         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
@@ -1385,7 +1392,7 @@ cfs_hash_lookup(cfs_hash_t *hs, const void *key)
 EXPORT_SYMBOL(cfs_hash_lookup);
 
 static void
-cfs_hash_for_each_enter(cfs_hash_t *hs)
+cfs_hash_for_each_enter(struct cfs_hash *hs)
 {
         LASSERT(!cfs_hash_is_exiting(hs));
 
@@ -1394,7 +1401,7 @@ cfs_hash_for_each_enter(cfs_hash_t *hs)
         /*
          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
          * because it's just an unreliable signal to rehash-thread,
-         * rehash-thread will try to finsih rehash ASAP when seeing this.
+        * rehash-thread will try to finish rehash ASAP when seeing this.
          */
         hs->hs_iterating = 1;
 
@@ -1402,7 +1409,7 @@ cfs_hash_for_each_enter(cfs_hash_t *hs)
         hs->hs_iterators++;
 
         /* NB: iteration is mostly called by service thread,
-         * we tend to cancel pending rehash-requst, instead of
+        * we tend to cancel pending rehash-request, instead of
          * blocking service thread, we will relaunch rehash request
          * after iteration */
         if (cfs_hash_is_rehashing(hs))
@@ -1411,7 +1418,7 @@ cfs_hash_for_each_enter(cfs_hash_t *hs)
 }
 
 static void
-cfs_hash_for_each_exit(cfs_hash_t *hs)
+cfs_hash_for_each_exit(struct cfs_hash *hs)
 {
        int remained;
        int bits;
@@ -1442,12 +1449,12 @@ cfs_hash_for_each_exit(cfs_hash_t *hs)
  *      cfs_hash_bd_del_locked
  */
 static __u64
-cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
+cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
                        void *data, int remove_safe)
 {
        struct hlist_node       *hnode;
        struct hlist_node       *pos;
-       cfs_hash_bd_t           bd;
+       struct cfs_hash_bd      bd;
        __u64                   count = 0;
        int                     excl  = !!remove_safe;
        int                     loop  = 0;
@@ -1495,16 +1502,16 @@ cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
        RETURN(count);
 }
 
-typedef struct {
+struct cfs_hash_cond_arg {
         cfs_hash_cond_opt_cb_t  func;
         void                   *arg;
-} cfs_hash_cond_arg_t;
+};
 
 static int
-cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                         struct hlist_node *hnode, void *data)
 {
-        cfs_hash_cond_arg_t *cond = data;
+       struct cfs_hash_cond_arg *cond = data;
 
         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
                 cfs_hash_bd_del_locked(hs, bd, hnode);
@@ -1517,9 +1524,9 @@ cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * any object be reference.
  */
 void
-cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
+cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
 {
-        cfs_hash_cond_arg_t arg = {
+       struct cfs_hash_cond_arg arg = {
                 .func   = func,
                 .arg    = data,
         };
@@ -1529,7 +1536,7 @@ cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
 EXPORT_SYMBOL(cfs_hash_cond_del);
 
 void
-cfs_hash_for_each(cfs_hash_t *hs,
+cfs_hash_for_each(struct cfs_hash *hs,
                   cfs_hash_for_each_cb_t func, void *data)
 {
         cfs_hash_for_each_tight(hs, func, data, 0);
@@ -1537,7 +1544,7 @@ cfs_hash_for_each(cfs_hash_t *hs,
 EXPORT_SYMBOL(cfs_hash_for_each);
 
 void
-cfs_hash_for_each_safe(cfs_hash_t *hs,
+cfs_hash_for_each_safe(struct cfs_hash *hs,
                        cfs_hash_for_each_cb_t func, void *data)
 {
         cfs_hash_for_each_tight(hs, func, data, 1);
@@ -1545,7 +1552,7 @@ cfs_hash_for_each_safe(cfs_hash_t *hs,
 EXPORT_SYMBOL(cfs_hash_for_each_safe);
 
 static int
-cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
              struct hlist_node *hnode, void *data)
 {
        *(int *)data = 0;
@@ -1553,7 +1560,7 @@ cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 int
-cfs_hash_is_empty(cfs_hash_t *hs)
+cfs_hash_is_empty(struct cfs_hash *hs)
 {
         int empty = 1;
 
@@ -1563,7 +1570,7 @@ cfs_hash_is_empty(cfs_hash_t *hs)
 EXPORT_SYMBOL(cfs_hash_is_empty);
 
 __u64
-cfs_hash_size_get(cfs_hash_t *hs)
+cfs_hash_size_get(struct cfs_hash *hs)
 {
        return cfs_hash_with_counter(hs) ?
               atomic_read(&hs->hs_count) :
@@ -1587,23 +1594,24 @@ EXPORT_SYMBOL(cfs_hash_size_get);
  * two cases, so iteration has to be stopped on change.
  */
 static int
-cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
+cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
+                       void *data)
 {
-       struct hlist_node *hnode;
-       struct hlist_node *tmp;
-        cfs_hash_bd_t     bd;
-        __u32             version;
-        int               count = 0;
-        int               stop_on_change;
-        int               rc;
-        int               i;
-        ENTRY;
+       struct hlist_node       *hnode;
+       struct hlist_node       *tmp;
+       struct cfs_hash_bd      bd;
+       __u32                   version;
+       int                     count = 0;
+       int                     stop_on_change;
+       int                     rc;
+       int                     i;
+       ENTRY;
 
-        stop_on_change = cfs_hash_with_rehash_key(hs) ||
-                         !cfs_hash_with_no_itemref(hs) ||
-                         CFS_HOP(hs, put_locked) == NULL;
-        cfs_hash_lock(hs, 0);
-        LASSERT(!cfs_hash_is_rehashing(hs));
+       stop_on_change = cfs_hash_with_rehash_key(hs) ||
+                        !cfs_hash_with_no_itemref(hs) ||
+                        hs->hs_ops->hs_put_locked == NULL;
+       cfs_hash_lock(hs, 0);
+       LASSERT(!cfs_hash_is_rehashing(hs));
 
        cfs_hash_for_each_bucket(hs, &bd, i) {
                struct hlist_head *hhead;
@@ -1649,7 +1657,7 @@ cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
 }
 
 int
-cfs_hash_for_each_nolock(cfs_hash_t *hs,
+cfs_hash_for_each_nolock(struct cfs_hash *hs,
                          cfs_hash_for_each_cb_t func, void *data)
 {
         ENTRY;
@@ -1659,10 +1667,10 @@ cfs_hash_for_each_nolock(cfs_hash_t *hs,
             !cfs_hash_with_no_itemref(hs))
                 RETURN(-EOPNOTSUPP);
 
-        if (CFS_HOP(hs, get) == NULL ||
-            (CFS_HOP(hs, put) == NULL &&
-             CFS_HOP(hs, put_locked) == NULL))
-                RETURN(-EOPNOTSUPP);
+       if (hs->hs_ops->hs_get == NULL ||
+          (hs->hs_ops->hs_put == NULL &&
+           hs->hs_ops->hs_put_locked == NULL))
+               RETURN(-EOPNOTSUPP);
 
         cfs_hash_for_each_enter(hs);
         cfs_hash_for_each_relax(hs, func, data);
@@ -1684,7 +1692,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_nolock);
  * the required locking is in place to prevent concurrent insertions.
  */
 int
-cfs_hash_for_each_empty(cfs_hash_t *hs,
+cfs_hash_for_each_empty(struct cfs_hash *hs,
                         cfs_hash_for_each_cb_t func, void *data)
 {
         unsigned  i = 0;
@@ -1693,10 +1701,10 @@ cfs_hash_for_each_empty(cfs_hash_t *hs,
         if (cfs_hash_with_no_lock(hs))
                 return -EOPNOTSUPP;
 
-        if (CFS_HOP(hs, get) == NULL ||
-            (CFS_HOP(hs, put) == NULL &&
-             CFS_HOP(hs, put_locked) == NULL))
-                return -EOPNOTSUPP;
+       if (hs->hs_ops->hs_get == NULL ||
+          (hs->hs_ops->hs_put == NULL &&
+           hs->hs_ops->hs_put_locked == NULL))
+               return -EOPNOTSUPP;
 
         cfs_hash_for_each_enter(hs);
         while (cfs_hash_for_each_relax(hs, func, data)) {
@@ -1709,12 +1717,12 @@ cfs_hash_for_each_empty(cfs_hash_t *hs,
 EXPORT_SYMBOL(cfs_hash_for_each_empty);
 
 void
-cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
                        cfs_hash_for_each_cb_t func, void *data)
 {
        struct hlist_head *hhead;
        struct hlist_node *hnode;
-       cfs_hash_bd_t      bd;
+       struct cfs_hash_bd         bd;
 
         cfs_hash_for_each_enter(hs);
         cfs_hash_lock(hs, 0);
@@ -1744,11 +1752,11 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each);
  * is held so the callback must never sleep.
    */
 void
-cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
+cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
                        cfs_hash_for_each_cb_t func, void *data)
 {
        struct hlist_node *hnode;
-       cfs_hash_bd_t      bds[2];
+       struct cfs_hash_bd         bds[2];
        unsigned           i;
 
        cfs_hash_lock(hs, 0);
@@ -1785,7 +1793,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_key);
  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
  */
 void
-cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
+cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
 {
         int     i;
 
@@ -1814,7 +1822,7 @@ cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
 
 void
-cfs_hash_rehash_cancel(cfs_hash_t *hs)
+cfs_hash_rehash_cancel(struct cfs_hash *hs)
 {
         cfs_hash_lock(hs, 1);
         cfs_hash_rehash_cancel_locked(hs);
@@ -1823,7 +1831,7 @@ cfs_hash_rehash_cancel(cfs_hash_t *hs)
 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
 
 int
-cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
+cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
 {
         int     rc;
 
@@ -1853,9 +1861,9 @@ cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
 EXPORT_SYMBOL(cfs_hash_rehash);
 
 static int
-cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
+cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
 {
-       cfs_hash_bd_t      new;
+       struct cfs_hash_bd      new;
        struct hlist_head *hhead;
        struct hlist_node *hnode;
        struct hlist_node *pos;
@@ -1885,17 +1893,18 @@ cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
 static int
 cfs_hash_rehash_worker(cfs_workitem_t *wi)
 {
-        cfs_hash_t         *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
-        cfs_hash_bucket_t **bkts;
-        cfs_hash_bd_t       bd;
-        unsigned int        old_size;
-        unsigned int        new_size;
-        int                 bsize;
-        int                 count = 0;
-        int                 rc = 0;
-        int                 i;
+       struct cfs_hash         *hs =
+               container_of(wi, struct cfs_hash, hs_rehash_wi);
+       struct cfs_hash_bucket **bkts;
+       struct cfs_hash_bd      bd;
+       unsigned int            old_size;
+       unsigned int            new_size;
+       int                     bsize;
+       int                     count = 0;
+       int                     rc = 0;
+       int                     i;
 
-        LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
+       LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
 
         cfs_hash_lock(hs, 0);
         LASSERT(cfs_hash_is_rehashing(hs));
@@ -1977,7 +1986,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
         if (bkts != NULL)
                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
         if (rc != 0)
-                CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
+               CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
        /* return 1 only if cfs_wi_exit is called */
        return rc == -ESRCH;
 }
@@ -1992,12 +2001,12 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
  * the registered cfs_hash_get() and cfs_hash_put() functions will
  * not be called.
  */
-void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
+void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
                         void *new_key, struct hlist_node *hnode)
 {
-        cfs_hash_bd_t        bds[3];
-        cfs_hash_bd_t        old_bds[2];
-        cfs_hash_bd_t        new_bd;
+       struct cfs_hash_bd        bds[3];
+       struct cfs_hash_bd        old_bds[2];
+       struct cfs_hash_bd        new_bd;
 
        LASSERT(!hlist_unhashed(hnode));
 
@@ -2040,8 +2049,8 @@ int cfs_hash_debug_header(struct seq_file *m)
 }
 EXPORT_SYMBOL(cfs_hash_debug_header);
 
-static cfs_hash_bucket_t **
-cfs_hash_full_bkts(cfs_hash_t *hs)
+static struct cfs_hash_bucket **
+cfs_hash_full_bkts(struct cfs_hash *hs)
 {
         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
         if (hs->hs_rehash_buckets == NULL)
@@ -2053,7 +2062,7 @@ cfs_hash_full_bkts(cfs_hash_t *hs)
 }
 
 static unsigned int
-cfs_hash_full_nbkt(cfs_hash_t *hs)
+cfs_hash_full_nbkt(struct cfs_hash *hs)
 {
         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
         if (hs->hs_rehash_buckets == NULL)
@@ -2064,7 +2073,7 @@ cfs_hash_full_nbkt(cfs_hash_t *hs)
                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
 }
 
-int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
+int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
 {
        int     dist[8] = { 0, };
        int     maxdep  = -1;
@@ -2104,15 +2113,13 @@ int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
         * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
         */
        for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
-               cfs_hash_bd_t bd;
+               struct cfs_hash_bd bd;
 
                bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
                cfs_hash_bd_lock(hs, &bd, 0);
                if (maxdep < bd.bd_bucket->hsb_depmax) {
                        maxdep  = bd.bd_bucket->hsb_depmax;
-#ifdef __KERNEL__
                        maxdepb = ffz(~maxdep);
-#endif
                }
                total += bd.bd_bucket->hsb_count;
                dist[min(fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
index 8d47825..f12dd93 100644 (file)
@@ -386,7 +386,7 @@ static inline void s2dhms(struct dhms *ts, time_t secs)
 typedef void (*cntr_init_callback)(struct lprocfs_stats *stats);
 
 struct obd_job_stats {
-       cfs_hash_t             *ojs_hash;
+       struct cfs_hash        *ojs_hash;
        struct list_head        ojs_list;
        rwlock_t                ojs_lock; /* protect the obj_list */
        cntr_init_callback      ojs_cntr_init_fn;
index d67d8fc..39d9d22 100644 (file)
@@ -645,7 +645,7 @@ struct lu_site {
         /**
          * objects hash table
          */
-       cfs_hash_t              *ls_obj_hash;
+       struct cfs_hash         *ls_obj_hash;
         /**
          * index of bucket on hash table while purging
          */
@@ -689,7 +689,7 @@ struct lu_site {
 static inline struct lu_site_bkt_data *
 lu_site_bkt_from_fid(struct lu_site *site, struct lu_fid *fid)
 {
-        cfs_hash_bd_t bd;
+       struct cfs_hash_bd bd;
 
         cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
         return cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
index 192bb84..4d64b19 100644 (file)
@@ -376,7 +376,7 @@ struct ldlm_namespace {
        ldlm_side_t             ns_client;
 
        /** Resource hash table for namespace. */
-       cfs_hash_t              *ns_rs_hash;
+       struct cfs_hash         *ns_rs_hash;
 
        /** serialize */
        spinlock_t              ns_lock;
index 851edca..d9af664 100644 (file)
@@ -202,12 +202,12 @@ struct obd_export {
        /** Connection count value from last successful reconnect rpc */
        __u32                     exp_conn_cnt;
        /** Hash list of all ldlm locks granted on this export */
-       cfs_hash_t               *exp_lock_hash;
+       struct cfs_hash          *exp_lock_hash;
        /**
         * Hash list for Posix lock deadlock detection, added with
         * ldlm_lock::l_exp_flock_hash.
         */
-       cfs_hash_t             *exp_flock_hash;
+       struct cfs_hash        *exp_flock_hash;
        struct list_head        exp_outstanding_replies;
        struct list_head        exp_uncommitted_replies;
        spinlock_t              exp_uncommitted_replies_lock;
index 2e0cf6b..53686cc 100644 (file)
@@ -84,7 +84,7 @@ struct lu_nodemap {
        /* proc directory entry */
        struct proc_dir_entry   *nm_proc_entry;
        /* attached client members of this nodemap */
-       cfs_hash_t              *nm_member_hash;
+       struct cfs_hash         *nm_member_hash;
        /* access by nodemap name */
        struct hlist_node       nm_hash;
 };
index de52699..277e902 100644 (file)
@@ -45,7 +45,7 @@
 struct nrs_crrn_net {
        struct ptlrpc_nrs_resource      cn_res;
        cfs_binheap_t                  *cn_binheap;
-       cfs_hash_t                     *cn_cli_hash;
+       struct cfs_hash                *cn_cli_hash;
        /**
         * Used when a new scheduling round commences, in order to synchronize
         * all clients with the new round number.
index 13f3794..eb69fa8 100644 (file)
@@ -107,7 +107,7 @@ struct nrs_orr_key {
 struct nrs_orr_data {
        struct ptlrpc_nrs_resource      od_res;
        cfs_binheap_t                  *od_binheap;
-       cfs_hash_t                     *od_obj_hash;
+       struct cfs_hash                *od_obj_hash;
        struct kmem_cache              *od_cache;
        /**
         * Used when a new scheduling round commences, in order to synchronize
index c02620f..b36b549 100644 (file)
@@ -200,7 +200,7 @@ struct nrs_tbf_head {
        /**
         * Hash of clients.
         */
-       cfs_hash_t                      *th_cli_hash;
+       struct cfs_hash                 *th_cli_hash;
        /**
         * Type of TBF policy.
         */
index ce5457f..f515f7a 100644 (file)
@@ -294,7 +294,7 @@ struct client_obd {
        void                    *cl_writeback_work;
        void                    *cl_lru_work;
        /* hash tables for osc_quota_info */
-       cfs_hash_t              *cl_quota_hash[MAXQUOTAS];
+       struct cfs_hash         *cl_quota_hash[MAXQUOTAS];
 };
 #define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
 
@@ -348,7 +348,7 @@ struct lov_obd {
        __u32                   lov_tgt_size;   /* size of tgts array */
        int                     lov_connects;
        int                     lov_pool_count;
-       cfs_hash_t             *lov_pools_hash_body; /* used for key access */
+       struct cfs_hash        *lov_pools_hash_body; /* used for key access */
        struct list_head        lov_pool_list;  /* used for sequential access */
        struct proc_dir_entry  *lov_pool_proc_entry;
        enum lustre_sec_part    lov_sp_me;
@@ -540,11 +540,11 @@ struct obd_device {
          * protection of other bits using _bh lock */
         unsigned long obd_recovery_expired:1;
         /* uuid-export hash body */
-        cfs_hash_t             *obd_uuid_hash;
+       struct cfs_hash             *obd_uuid_hash;
         /* nid-export hash body */
-        cfs_hash_t             *obd_nid_hash;
+       struct cfs_hash             *obd_nid_hash;
        /* nid stats body */
-       cfs_hash_t             *obd_nid_stats_hash;
+       struct cfs_hash             *obd_nid_stats_hash;
        struct list_head        obd_nid_stats;
        atomic_t                obd_refcount;
        struct list_head        obd_exports;
index ac17c1a..354c1fb 100644 (file)
@@ -170,7 +170,7 @@ struct ldlm_flock_lookup_cb_data {
        struct obd_export *exp;
 };
 
-static int ldlm_flock_lookup_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_flock_lookup_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                struct hlist_node *hnode, void *data)
 {
        struct ldlm_flock_lookup_cb_data *cb_data = data;
@@ -864,7 +864,7 @@ void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
  * Export handle<->flock hash operations.
  */
 static unsigned
-ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+ldlm_export_flock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_u64_hash(*(__u64 *)key, mask);
 }
@@ -891,7 +891,7 @@ ldlm_export_flock_object(struct hlist_node *hnode)
 }
 
 static void
-ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_flock_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
        struct ldlm_flock *flock;
@@ -906,7 +906,7 @@ ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode)
 }
 
 static void
-ldlm_export_flock_put(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_flock_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
        struct ldlm_flock *flock;
@@ -923,7 +923,7 @@ ldlm_export_flock_put(cfs_hash_t *hs, struct hlist_node *hnode)
        }
 }
 
-static cfs_hash_ops_t ldlm_export_flock_ops = {
+static struct cfs_hash_ops ldlm_export_flock_ops = {
        .hs_hash        = ldlm_export_flock_hash,
        .hs_key         = ldlm_export_flock_key,
        .hs_keycmp      = ldlm_export_flock_keycmp,
index f009bfa..f9e72bd 100644 (file)
@@ -2082,7 +2082,7 @@ static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
         return LDLM_ITER_CONTINUE;
 }
 
-static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                              struct hlist_node *hnode, void *arg)
 {
         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -2264,8 +2264,9 @@ struct export_cl_data {
  * Iterator function for ldlm_cancel_locks_for_export.
  * Cancels passed locks.
  */
-static int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                          struct hlist_node *hnode, void *data)
+static int
+ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                               struct hlist_node *hnode, void *data)
 
 {
        struct export_cl_data   *ecl = (struct export_cl_data *)data;
index 4c70b58..ee22e75 100644 (file)
@@ -2434,7 +2434,7 @@ static int ldlm_hpreq_handler(struct ptlrpc_request *req)
         RETURN(0);
 }
 
-static int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_revoke_lock_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                               struct hlist_node *hnode, void *data)
 
 {
@@ -2679,7 +2679,7 @@ void ldlm_put_ref(void)
  * Export handle<->lock hash operations.
  */
 static unsigned
-ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+ldlm_export_lock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
         return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
 }
@@ -2715,7 +2715,7 @@ ldlm_export_lock_object(struct hlist_node *hnode)
 }
 
 static void
-ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_lock_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct ldlm_lock *lock;
 
@@ -2724,7 +2724,7 @@ ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode)
 }
 
 static void
-ldlm_export_lock_put(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_lock_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct ldlm_lock *lock;
 
@@ -2732,7 +2732,7 @@ ldlm_export_lock_put(cfs_hash_t *hs, struct hlist_node *hnode)
         LDLM_LOCK_RELEASE(lock);
 }
 
-static cfs_hash_ops_t ldlm_export_lock_ops = {
+static struct cfs_hash_ops ldlm_export_lock_ops = {
         .hs_hash        = ldlm_export_lock_hash,
         .hs_key         = ldlm_export_lock_key,
         .hs_keycmp      = ldlm_export_lock_keycmp,
index 684062f..bc81406 100644 (file)
@@ -1952,8 +1952,9 @@ struct ldlm_cli_cancel_arg {
         void   *lc_opaque;
 };
 
-static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                      struct hlist_node *hnode, void *arg)
+static int
+ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                           struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource           *res = cfs_hash_object(hs, hnode);
        struct ldlm_cli_cancel_arg     *lc = arg;
@@ -2047,7 +2048,7 @@ static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
         return helper->iter(lock, helper->closure);
 }
 
-static int ldlm_res_iter_helper(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                struct hlist_node *hnode, void *arg)
 
 {
index bc430a9..3dfde84 100644 (file)
@@ -162,7 +162,7 @@ static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
 {
        struct ldlm_namespace   *ns  = m->private;
        __u64                   res = 0;
-       cfs_hash_bd_t           bd;
+       struct cfs_hash_bd              bd;
        int                     i;
 
        /* result is not strictly consistant */
@@ -390,7 +390,7 @@ static int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
 
 #endif /* CONFIG_PROC_FS */
 
-static unsigned ldlm_res_hop_hash(cfs_hash_t *hs,
+static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
                                   const void *key, unsigned mask)
 {
         const struct ldlm_res_id     *id  = key;
@@ -402,7 +402,7 @@ static unsigned ldlm_res_hop_hash(cfs_hash_t *hs,
         return val & mask;
 }
 
-static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs,
+static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
                                       const void *key, unsigned mask)
 {
         const struct ldlm_res_id *id = key;
@@ -454,7 +454,8 @@ static void *ldlm_res_hop_object(struct hlist_node *hnode)
        return hlist_entry(hnode, struct ldlm_resource, lr_hash);
 }
 
-static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void
+ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct ldlm_resource *res;
 
@@ -462,7 +463,8 @@ static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode)
         ldlm_resource_getref(res);
 }
 
-static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void
+ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct ldlm_resource *res;
 
@@ -471,7 +473,7 @@ static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
         ldlm_resource_putref_locked(res);
 }
 
-static void ldlm_res_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct ldlm_resource *res;
 
@@ -479,7 +481,7 @@ static void ldlm_res_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
         ldlm_resource_putref(res);
 }
 
-static cfs_hash_ops_t ldlm_ns_hash_ops = {
+static struct cfs_hash_ops ldlm_ns_hash_ops = {
         .hs_hash        = ldlm_res_hop_hash,
         .hs_key         = ldlm_res_hop_key,
         .hs_keycmp      = ldlm_res_hop_keycmp,
@@ -490,7 +492,7 @@ static cfs_hash_ops_t ldlm_ns_hash_ops = {
         .hs_put         = ldlm_res_hop_put
 };
 
-static cfs_hash_ops_t ldlm_ns_fid_hash_ops = {
+static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
         .hs_hash        = ldlm_res_hop_fid_hash,
         .hs_key         = ldlm_res_hop_key,
         .hs_keycmp      = ldlm_res_hop_keycmp,
@@ -508,7 +510,7 @@ typedef struct {
         /** hash bits */
         unsigned        nsd_all_bits;
         /** hash operations */
-        cfs_hash_ops_t *nsd_hops;
+       struct cfs_hash_ops *nsd_hops;
 } ldlm_ns_hash_def_t;
 
 static ldlm_ns_hash_def_t ldlm_ns_hash_defs[] =
@@ -565,7 +567,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
         struct ldlm_namespace *ns = NULL;
         struct ldlm_ns_bucket *nsb;
         ldlm_ns_hash_def_t    *nsd;
-        cfs_hash_bd_t          bd;
+       struct cfs_hash_bd          bd;
         int                    idx;
         int                    rc;
         ENTRY;
@@ -751,7 +753,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
         } while (1);
 }
 
-static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                               struct hlist_node *hnode, void *arg)
 {
         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -764,7 +766,7 @@ static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
         return 0;
 }
 
-static int ldlm_resource_complain(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                  struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
@@ -1067,7 +1069,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
 {
        struct hlist_node       *hnode;
        struct ldlm_resource    *res = NULL;
-       cfs_hash_bd_t           bd;
+       struct cfs_hash_bd              bd;
        __u64                   version;
        int                     ns_refcount = 0;
 
@@ -1147,7 +1149,7 @@ struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
        return res;
 }
 
-static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
+static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
                                          struct ldlm_resource *res)
 {
         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
@@ -1178,7 +1180,7 @@ static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
 int ldlm_resource_putref(struct ldlm_resource *res)
 {
        struct ldlm_namespace *ns = ldlm_res_to_ns(res);
-       cfs_hash_bd_t   bd;
+       struct cfs_hash_bd   bd;
 
        LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
        CDEBUG(D_INFO, "putref res: %p count: %d\n",
@@ -1207,7 +1209,7 @@ int ldlm_resource_putref_locked(struct ldlm_resource *res)
               res, atomic_read(&res->lr_refcount) - 1);
 
        if (atomic_dec_and_test(&res->lr_refcount)) {
-               cfs_hash_bd_t bd;
+               struct cfs_hash_bd bd;
 
                cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
                                &res->lr_name, &bd);
@@ -1315,7 +1317,7 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
        mutex_unlock(ldlm_namespace_lock(client));
 }
 
-static int ldlm_res_hash_dump(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                              struct hlist_node *hnode, void *arg)
 {
         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
index 2ffdd59..288b642 100644 (file)
@@ -423,7 +423,7 @@ static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
                 ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
 }
 
-static int vvp_pgcache_obj_get(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                               struct hlist_node *hnode, void *data)
 {
         struct vvp_pgcache_id   *id  = data;
index 60617cf..6f1bff1 100644 (file)
@@ -200,7 +200,7 @@ struct lod_device {
        /* OST pool data */
        struct ost_pool         lod_pool_info; /* all OSTs in a packed array */
        int                     lod_pool_count;
-       cfs_hash_t             *lod_pools_hash_body; /* used for key access */
+       struct cfs_hash        *lod_pools_hash_body; /* used for key access */
        struct list_head        lod_pool_list; /* used for sequential access */
        struct proc_dir_entry  *lod_pool_proc_entry;
 
@@ -462,7 +462,7 @@ void lod_pool_putref(struct pool_desc *pool);
 int lod_ost_pool_free(struct ost_pool *op);
 int lod_pool_del(struct obd_device *obd, char *poolname);
 int lod_ost_pool_init(struct ost_pool *op, unsigned int count);
-extern cfs_hash_ops_t pool_hash_operations;
+extern struct cfs_hash_ops pool_hash_operations;
 int lod_check_index_in_pool(__u32 idx, struct pool_desc *pool);
 int lod_pool_new(struct obd_device *obd, char *poolname);
 int lod_pool_add(struct obd_device *obd, char *poolname, char *ostname);
index 19e473f..5caef4c 100644 (file)
@@ -140,7 +140,8 @@ static void pool_putref_locked(struct pool_desc *pool)
  *
  * \retval             computed hash value from \a key and limited by \a mask
  */
-static __u32 pool_hashfn(cfs_hash_t *hash_body, const void *key, unsigned mask)
+static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key,
+                        unsigned mask)
 {
        return cfs_hash_djb2_hash(key, strnlen(key, LOV_MAXPOOLNAME), mask);
 }
@@ -195,7 +196,7 @@ static void *pool_hashobject(struct hlist_node *hnode)
        return hlist_entry(hnode, struct pool_desc, pool_hash);
 }
 
-static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void pool_hashrefcount_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct pool_desc *pool;
 
@@ -203,7 +204,7 @@ static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
        pool_getref(pool);
 }
 
-static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
+static void pool_hashrefcount_put_locked(struct cfs_hash *hs,
                                         struct hlist_node *hnode)
 {
        struct pool_desc *pool;
@@ -212,7 +213,7 @@ static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
        pool_putref_locked(pool);
 }
 
-cfs_hash_ops_t pool_hash_operations = {
+struct cfs_hash_ops pool_hash_operations = {
        .hs_hash        = pool_hashfn,
        .hs_key         = pool_key,
        .hs_keycmp      = pool_hashkey_keycmp,
index 2bd1547..dbf72df 100644 (file)
@@ -277,7 +277,7 @@ extern struct lprocfs_vars lprocfs_lov_obd_vars[];
 extern struct lu_device_type lov_device_type;
 
 /* pools */
-extern cfs_hash_ops_t pool_hash_operations;
+extern struct cfs_hash_ops pool_hash_operations;
 /* ost_pool methods */
 int lov_ost_pool_init(struct ost_pool *op, unsigned int count);
 int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count);
index b808d58..9b191b6 100644 (file)
@@ -86,7 +86,8 @@ static void lov_pool_putref_locked(struct pool_desc *pool)
  * Chapter 6.4.
  * Addison Wesley, 1973
  */
-static __u32 pool_hashfn(cfs_hash_t *hash_body, const void *key, unsigned mask)
+static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key,
+                        unsigned mask)
 {
         int i;
         __u32 result;
@@ -126,7 +127,7 @@ static void *pool_hashobject(struct hlist_node *hnode)
        return hlist_entry(hnode, struct pool_desc, pool_hash);
 }
 
-static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void pool_hashrefcount_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct pool_desc *pool;
 
@@ -134,7 +135,7 @@ static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
         lov_pool_getref(pool);
 }
 
-static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
+static void pool_hashrefcount_put_locked(struct cfs_hash *hs,
                                         struct hlist_node *hnode)
 {
         struct pool_desc *pool;
@@ -143,7 +144,7 @@ static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
         lov_pool_putref_locked(pool);
 }
 
-cfs_hash_ops_t pool_hash_operations = {
+struct cfs_hash_ops pool_hash_operations = {
         .hs_hash        = pool_hashfn,
         .hs_key         = pool_key,
         .hs_keycmp      = pool_hashkey_keycmp,
index 667285d..3fa5883 100644 (file)
@@ -693,8 +693,9 @@ static struct lprocfs_vars lprocfs_mdt_obd_vars[] = {
        { NULL }
 };
 
-static int lprocfs_mdt_print_open_files(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                       struct hlist_node *hnode, void *v)
+static int
+lprocfs_mdt_print_open_files(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                            struct hlist_node *hnode, void *v)
 {
        struct obd_export       *exp = cfs_hash_object(hs, hnode);
        struct seq_file         *seq = v;
index 0874045..ef34ad8 100644 (file)
@@ -671,9 +671,9 @@ static void cl_env_init0(struct cl_env *cle, void *debug)
  * The implementation of using hash table to connect cl_env and thread
  */
 
-static cfs_hash_t *cl_env_hash;
+static struct cfs_hash *cl_env_hash;
 
-static unsigned cl_env_hops_hash(cfs_hash_t *lh,
+static unsigned cl_env_hops_hash(struct cfs_hash *lh,
                                  const void *key, unsigned mask)
 {
 #if BITS_PER_LONG == 64
@@ -699,13 +699,13 @@ static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
         return (key == cle->ce_owner);
 }
 
-static void cl_env_hops_noop(cfs_hash_t *hs, struct hlist_node *hn)
+static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
 {
        struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
         LASSERT(cle->ce_magic == &cl_env_init0);
 }
 
-static cfs_hash_ops_t cl_env_hops = {
+static struct cfs_hash_ops cl_env_hops = {
         .hs_hash        = cl_env_hops_hash,
         .hs_key         = cl_env_hops_obj,
         .hs_keycmp      = cl_env_hops_keycmp,
index 4023a7d..2809176 100644 (file)
@@ -837,7 +837,7 @@ struct obd_export *class_new_export(struct obd_device *obd,
                                     struct obd_uuid *cluuid)
 {
         struct obd_export *export;
-        cfs_hash_t *hash = NULL;
+       struct cfs_hash *hash = NULL;
         int rc = 0;
         ENTRY;
 
@@ -1420,7 +1420,7 @@ EXPORT_SYMBOL(obd_export_nid2str);
 
 int obd_export_evict_by_nid(struct obd_device *obd, const char *nid)
 {
-       cfs_hash_t *nid_hash;
+       struct cfs_hash *nid_hash;
        struct obd_export *doomed_exp = NULL;
        int exports_evicted = 0;
 
@@ -1468,7 +1468,7 @@ EXPORT_SYMBOL(obd_export_evict_by_nid);
 
 int obd_export_evict_by_uuid(struct obd_device *obd, const char *uuid)
 {
-       cfs_hash_t *uuid_hash;
+       struct cfs_hash *uuid_hash;
        struct obd_export *doomed_exp = NULL;
        struct obd_uuid doomed_uuid;
        int exports_evicted = 0;
index 930ca06..6bdf476 100644 (file)
@@ -73,7 +73,8 @@ struct job_stat {
        struct obd_job_stats    *js_jobstats;
 };
 
-static unsigned job_stat_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+static unsigned
+job_stat_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_djb2_hash(key, strlen(key), mask);
 }
@@ -98,7 +99,7 @@ static void *job_stat_object(struct hlist_node *hnode)
        return hlist_entry(hnode, struct job_stat, js_hash);
 }
 
-static void job_stat_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void job_stat_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct job_stat *job;
        job = hlist_entry(hnode, struct job_stat, js_hash);
@@ -125,19 +126,19 @@ static void job_putref(struct job_stat *job)
                job_free(job);
 }
 
-static void job_stat_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void job_stat_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct job_stat *job;
        job = hlist_entry(hnode, struct job_stat, js_hash);
        job_putref(job);
 }
 
-static void job_stat_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+static void job_stat_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        CERROR("should not have any items\n");
 }
 
-static cfs_hash_ops_t job_stats_hash_ops = {
+static struct cfs_hash_ops job_stats_hash_ops = {
        .hs_hash       = job_stat_hash,
        .hs_key        = job_stat_key,
        .hs_keycmp     = job_stat_keycmp,
@@ -147,7 +148,7 @@ static cfs_hash_ops_t job_stats_hash_ops = {
        .hs_exit       = job_stat_exit,
 };
 
-static int job_iter_callback(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int job_iter_callback(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                             struct hlist_node *hnode, void *data)
 {
        time_t oldest = *((time_t *)data);
index 4917fe6..b3b1ee1 100644 (file)
@@ -137,7 +137,7 @@ static void lprocfs_free_client_stats(struct nid_stat *client_stat)
 
 void lprocfs_free_per_client_stats(struct obd_device *obd)
 {
-       cfs_hash_t *hash = obd->obd_nid_stats_hash;
+       struct cfs_hash *hash = obd->obd_nid_stats_hash;
        struct nid_stat *stat;
        ENTRY;
 
@@ -154,8 +154,9 @@ void lprocfs_free_per_client_stats(struct obd_device *obd)
 }
 EXPORT_SYMBOL(lprocfs_free_per_client_stats);
 
-static int lprocfs_exp_print_uuid_seq(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                     struct hlist_node *hnode, void *cb_data)
+static int
+lprocfs_exp_print_uuid_seq(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                          struct hlist_node *hnode, void *cb_data)
 {
        struct seq_file *m = cb_data;
        struct obd_export *exp = cfs_hash_object(hs, hnode);
@@ -165,8 +166,9 @@ static int lprocfs_exp_print_uuid_seq(cfs_hash_t *hs, cfs_hash_bd_t *bd,
        return 0;
 }
 
-static int lprocfs_exp_print_nodemap_seq(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                       struct hlist_node *hnode, void *cb_data)
+static int
+lprocfs_exp_print_nodemap_seq(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                             struct hlist_node *hnode, void *cb_data)
 {
        struct seq_file *m = cb_data;
        struct obd_export *exp = cfs_hash_object(hs, hnode);
@@ -177,7 +179,8 @@ static int lprocfs_exp_print_nodemap_seq(cfs_hash_t *hs, cfs_hash_bd_t *bd,
        return 0;
 }
 
-static int lprocfs_exp_nodemap_seq_show(struct seq_file *m, void *data)
+static int
+lprocfs_exp_nodemap_seq_show(struct seq_file *m, void *data)
 {
        struct nid_stat *stats = m->private;
        struct obd_device *obd = stats->nid_obd;
@@ -199,8 +202,9 @@ static int lprocfs_exp_uuid_seq_show(struct seq_file *m, void *data)
 }
 LPROC_SEQ_FOPS_RO(lprocfs_exp_uuid);
 
-static int lprocfs_exp_print_hash_seq(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                     struct hlist_node *hnode, void *cb_data)
+static int
+lprocfs_exp_print_hash_seq(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                          struct hlist_node *hnode, void *cb_data)
 
 {
        struct seq_file *m = cb_data;
index c111b05..a51dc41 100644 (file)
@@ -98,7 +98,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
         struct lu_object_header *top;
         struct lu_site          *site;
         struct lu_object        *orig;
-        cfs_hash_bd_t            bd;
+       struct cfs_hash_bd            bd;
        const struct lu_fid     *fid;
 
         top  = o->lo_header;
@@ -202,8 +202,8 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
        top = o->lo_header;
        set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
        if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
-               cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
-               cfs_hash_bd_t bd;
+               struct cfs_hash *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
+               struct cfs_hash_bd bd;
 
                cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
                if (!list_empty(&top->loh_lru)) {
@@ -346,8 +346,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
         struct lu_object_header *h;
         struct lu_object_header *temp;
         struct lu_site_bkt_data *bkt;
-        cfs_hash_bd_t            bd;
-        cfs_hash_bd_t            bd2;
+       struct cfs_hash_bd            bd;
+       struct cfs_hash_bd            bd2;
        struct list_head         dispose;
        int                      did_sth;
        unsigned int             start;
@@ -574,7 +574,7 @@ int lu_object_invariant(const struct lu_object *o)
 }
 
 static struct lu_object *htable_lookup(struct lu_site *s,
-                                      cfs_hash_bd_t *bd,
+                                      struct cfs_hash_bd *bd,
                                       const struct lu_fid *f,
                                       wait_queue_t *waiter,
                                       __u64 *version)
@@ -667,8 +667,8 @@ static struct lu_object *lu_object_new(const struct lu_env *env,
                                        const struct lu_object_conf *conf)
 {
         struct lu_object        *o;
-        cfs_hash_t              *hs;
-        cfs_hash_bd_t            bd;
+       struct cfs_hash              *hs;
+       struct cfs_hash_bd            bd;
 
         o = lu_object_alloc(env, dev, f, conf);
         if (unlikely(IS_ERR(o)))
@@ -696,8 +696,8 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
        struct lu_object      *o;
        struct lu_object      *shadow;
        struct lu_site        *s;
-       cfs_hash_t            *hs;
-       cfs_hash_bd_t          bd;
+       struct cfs_hash            *hs;
+       struct cfs_hash_bd          bd;
        __u64                  version = 0;
 
         /*
@@ -868,7 +868,7 @@ struct lu_site_print_arg {
 };
 
 static int
-lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                  struct hlist_node *hnode, void *data)
 {
        struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
@@ -957,7 +957,7 @@ static unsigned long lu_htable_order(struct lu_device *top)
         return bits;
 }
 
-static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
+static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
                                const void *key, unsigned mask)
 {
        struct lu_fid  *fid = (struct lu_fid *)key;
@@ -997,7 +997,7 @@ static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
        return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
 }
 
-static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct lu_object_header *h;
 
@@ -1005,12 +1005,12 @@ static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
        atomic_inc(&h->loh_ref);
 }
 
-static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         LBUG(); /* we should never called it */
 }
 
-static cfs_hash_ops_t lu_site_hash_ops = {
+static struct cfs_hash_ops lu_site_hash_ops = {
         .hs_hash        = lu_obj_hop_hash,
         .hs_key         = lu_obj_hop_key,
         .hs_keycmp      = lu_obj_hop_keycmp,
@@ -1042,7 +1042,7 @@ EXPORT_SYMBOL(lu_dev_del_linkage);
 int lu_site_init(struct lu_site *s, struct lu_device *top)
 {
        struct lu_site_bkt_data *bkt;
-       cfs_hash_bd_t bd;
+       struct cfs_hash_bd bd;
        char name[16];
        unsigned long bits;
        unsigned int i;
@@ -1914,10 +1914,10 @@ typedef struct lu_site_stats{
         unsigned        lss_busy;
 } lu_site_stats_t;
 
-static void lu_site_stats_get(cfs_hash_t *hs,
+static void lu_site_stats_get(struct cfs_hash *hs,
                               lu_site_stats_t *stats, int populated)
 {
-       cfs_hash_bd_t bd;
+       struct cfs_hash_bd bd;
        unsigned int  i;
 
         cfs_hash_for_each_bucket(hs, &bd, i) {
@@ -2275,8 +2275,8 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
        struct lu_fid           *old = &o->lo_header->loh_fid;
        struct lu_object        *shadow;
        wait_queue_t             waiter;
-       cfs_hash_t              *hs;
-       cfs_hash_bd_t            bd;
+       struct cfs_hash         *hs;
+       struct cfs_hash_bd       bd;
        __u64                    version = 0;
 
        LASSERT(fid_is_zero(old));
index b386331..28af8f8 100644 (file)
@@ -49,9 +49,9 @@
 
 #include "llog_internal.h"
 
-static cfs_hash_ops_t uuid_hash_ops;
-static cfs_hash_ops_t nid_hash_ops;
-static cfs_hash_ops_t nid_stat_hash_ops;
+static struct cfs_hash_ops uuid_hash_ops;
+static struct cfs_hash_ops nid_hash_ops;
+static struct cfs_hash_ops nid_stat_hash_ops;
 
 /*********** string parsing utils *********/
 
@@ -1917,7 +1917,7 @@ EXPORT_SYMBOL(class_manual_cleanup);
  */
 
 static unsigned
-uuid_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
         return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
                                   sizeof(((struct obd_uuid *)key)->uuid), mask);
@@ -1956,7 +1956,7 @@ uuid_export_object(struct hlist_node *hnode)
 }
 
 static void
-uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
+uuid_export_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct obd_export *exp;
 
@@ -1965,7 +1965,7 @@ uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
 }
 
 static void
-uuid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+uuid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct obd_export *exp;
 
@@ -1973,7 +1973,7 @@ uuid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
         class_export_put(exp);
 }
 
-static cfs_hash_ops_t uuid_hash_ops = {
+static struct cfs_hash_ops uuid_hash_ops = {
         .hs_hash        = uuid_hash,
         .hs_key         = uuid_key,
         .hs_keycmp      = uuid_keycmp,
@@ -1988,7 +1988,7 @@ static cfs_hash_ops_t uuid_hash_ops = {
  */
 
 static unsigned
-nid_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+nid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
         return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
 }
@@ -2026,7 +2026,7 @@ nid_export_object(struct hlist_node *hnode)
 }
 
 static void
-nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
+nid_export_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct obd_export *exp;
 
@@ -2035,7 +2035,7 @@ nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
 }
 
 static void
-nid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+nid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct obd_export *exp;
 
@@ -2043,7 +2043,7 @@ nid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
         class_export_put(exp);
 }
 
-static cfs_hash_ops_t nid_hash_ops = {
+static struct cfs_hash_ops nid_hash_ops = {
         .hs_hash        = nid_hash,
         .hs_key         = nid_key,
         .hs_keycmp      = nid_kepcmp,
@@ -2080,7 +2080,7 @@ nidstats_object(struct hlist_node *hnode)
 }
 
 static void
-nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode)
+nidstats_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct nid_stat *ns;
 
@@ -2089,7 +2089,7 @@ nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode)
 }
 
 static void
-nidstats_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+nidstats_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         struct nid_stat *ns;
 
@@ -2097,7 +2097,7 @@ nidstats_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
         nidstat_putref(ns);
 }
 
-static cfs_hash_ops_t nid_stat_hash_ops = {
+static struct cfs_hash_ops nid_stat_hash_ops = {
         .hs_hash        = nid_hash,
         .hs_key         = nidstats_key,
         .hs_keycmp      = nidstats_keycmp,
index 1524ea7..1e14457 100644 (file)
@@ -138,7 +138,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
  * Hash operations for uid/gid <-> osc_quota_info
  */
 static unsigned
-oqi_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
+oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_u32_hash(*((__u32*)key), mask);
 }
@@ -171,17 +171,17 @@ oqi_object(struct hlist_node *hnode)
 }
 
 static void
-oqi_get(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
 }
 
 static void
-oqi_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
 }
 
 static void
-oqi_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct osc_quota_info *oqi;
 
@@ -194,7 +194,7 @@ oqi_exit(cfs_hash_t *hs, struct hlist_node *hnode)
 #define HASH_QUOTA_CUR_BITS 5
 #define HASH_QUOTA_MAX_BITS 15
 
-static cfs_hash_ops_t quota_hash_ops = {
+static struct cfs_hash_ops quota_hash_ops = {
        .hs_hash        = oqi_hashfn,
        .hs_keycmp      = oqi_keycmp,
        .hs_key         = oqi_key,
index d6c1717..f3a6211 100644 (file)
@@ -41,8 +41,8 @@
 
 #include "ptlrpc_internal.h"
 
-static cfs_hash_t *conn_hash = NULL;
-static cfs_hash_ops_t conn_hash_ops;
+static struct cfs_hash *conn_hash;
+static struct cfs_hash_ops conn_hash_ops;
 
 struct ptlrpc_connection *
 ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
@@ -164,7 +164,7 @@ void ptlrpc_connection_fini(void) {
  * Hash operations for net_peer<->connection
  */
 static unsigned
-conn_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
+conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
 }
@@ -198,7 +198,7 @@ conn_object(struct hlist_node *hnode)
 }
 
 static void
-conn_get(cfs_hash_t *hs, struct hlist_node *hnode)
+conn_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ptlrpc_connection *conn;
 
@@ -207,7 +207,7 @@ conn_get(cfs_hash_t *hs, struct hlist_node *hnode)
 }
 
 static void
-conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+conn_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ptlrpc_connection *conn;
 
@@ -216,7 +216,7 @@ conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
 }
 
 static void
-conn_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+conn_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ptlrpc_connection *conn;
 
@@ -232,7 +232,7 @@ conn_exit(cfs_hash_t *hs, struct hlist_node *hnode)
        OBD_FREE_PTR(conn);
 }
 
-static cfs_hash_ops_t conn_hash_ops = {
+static struct cfs_hash_ops conn_hash_ops = {
        .hs_hash        = conn_hashfn,
        .hs_keycmp      = conn_keycmp,
        .hs_key         = conn_key,
index 82a16b6..5a634b2 100644 (file)
@@ -66,7 +66,7 @@ rwlock_t nm_range_tree_lock;
  * Hash keyed on nodemap name containing all
  * nodemaps
  */
-static cfs_hash_t *nodemap_hash;
+static struct cfs_hash *nodemap_hash;
 
 /**
  * Nodemap destructor
@@ -114,7 +114,7 @@ void nodemap_putref(struct lu_nodemap *nodemap)
                nodemap_destroy(nodemap);
 }
 
-static __u32 nodemap_hashfn(cfs_hash_t *hash_body,
+static __u32 nodemap_hashfn(struct cfs_hash *hash_body,
                            const void *key, unsigned mask)
 {
        return cfs_hash_djb2_hash(key, strlen(key), mask);
@@ -144,7 +144,7 @@ static void *nodemap_hs_hashobject(struct hlist_node *hnode)
        return hlist_entry(hnode, struct lu_nodemap, nm_hash);
 }
 
-static void nodemap_hs_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nodemap_hs_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct lu_nodemap *nodemap;
 
@@ -152,7 +152,7 @@ static void nodemap_hs_get(cfs_hash_t *hs, struct hlist_node *hnode)
        nodemap_getref(nodemap);
 }
 
-static void nodemap_hs_put_locked(cfs_hash_t *hs,
+static void nodemap_hs_put_locked(struct cfs_hash *hs,
                                  struct hlist_node *hnode)
 {
        struct lu_nodemap *nodemap;
@@ -161,7 +161,7 @@ static void nodemap_hs_put_locked(cfs_hash_t *hs,
        nodemap_putref(nodemap);
 }
 
-static cfs_hash_ops_t nodemap_hash_operations = {
+static struct cfs_hash_ops nodemap_hash_operations = {
        .hs_hash        = nodemap_hashfn,
        .hs_key         = nodemap_hs_key,
        .hs_keycmp      = nodemap_hs_keycmp,
@@ -180,7 +180,7 @@ static cfs_hash_ops_t nodemap_hash_operations = {
  * \param      hnode           hash node
  * \param      data            not used here
  */
-static int nodemap_cleanup_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int nodemap_cleanup_iter_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                   struct hlist_node *hnode, void *data)
 {
        struct lu_nodemap *nodemap;
@@ -1019,7 +1019,7 @@ cleanup:
        return rc;
 }
 
-static int nm_member_revoke_all_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int nm_member_revoke_all_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                   struct hlist_node *hnode, void *data)
 {
        struct lu_nodemap *nodemap;
index 43c38f8..458f375 100644 (file)
@@ -159,7 +159,7 @@ static int nodemap_ranges_open(struct inode *inode, struct file *file)
  * \param      data            seq_file to print to
  * \retval     0               success
  */
-static int nodemap_exports_show_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int nodemap_exports_show_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                   struct hlist_node *hnode, void *data)
 {
        struct seq_file         *m = data;
index 9871f8d..1216fe7 100644 (file)
@@ -47,7 +47,7 @@ void nm_member_putref(struct obd_export *exp)
 {
 }
 
-static __u32 nm_member_hashfn(cfs_hash_t *hash_body,
+static __u32 nm_member_hashfn(struct cfs_hash *hash_body,
                           const void *key, unsigned mask)
 {
        return hash_long((unsigned long)key, hash_body->hs_bkt_bits) & mask;
@@ -79,7 +79,7 @@ static void *nm_member_hs_hashobject(struct hlist_node *hnode)
                           exp_target_data.ted_nodemap_member);
 }
 
-static void nm_member_hs_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nm_member_hs_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct obd_export       *exp;
 
@@ -88,7 +88,7 @@ static void nm_member_hs_get(cfs_hash_t *hs, struct hlist_node *hnode)
        nm_member_getref(exp);
 }
 
-static void nm_member_hs_put_locked(cfs_hash_t *hs,
+static void nm_member_hs_put_locked(struct cfs_hash *hs,
                                 struct hlist_node *hnode)
 {
        struct obd_export       *exp;
@@ -116,7 +116,7 @@ void nm_member_del(struct lu_nodemap *nodemap, struct obd_export *exp)
        exp->exp_target_data.ted_nodemap = NULL;
 }
 
-static cfs_hash_ops_t nm_member_hash_operations = {
+static struct cfs_hash_ops nm_member_hash_operations = {
        .hs_hash        = nm_member_hashfn,
        .hs_key         = nm_member_hs_key,
        .hs_keycmp      = nm_member_hs_keycmp,
@@ -154,7 +154,7 @@ int nm_member_init_hash(struct lu_nodemap *nodemap)
 /**
  * Callback from deleting a hash member
  */
-static int nm_member_delete_hash_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int nm_member_delete_hash_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                 struct hlist_node *hnode, void *data)
 {
        struct obd_export       *exp;
@@ -243,7 +243,7 @@ static void nm_member_exp_revoke(struct obd_export *exp)
        ldlm_revoke_export_locks(exp);
 }
 
-static int nm_member_reclassify_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int nm_member_reclassify_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                   struct hlist_node *hnode, void *data)
 {
        struct obd_export       *exp;
@@ -304,8 +304,9 @@ void nm_member_reclassify_nodemap(struct lu_nodemap *nodemap)
        mutex_unlock(&reclassify_nodemap_lock);
 }
 
-static int nm_member_revoke_locks_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                    struct hlist_node *hnode, void *data)
+static int
+nm_member_revoke_locks_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                         struct hlist_node *hnode, void *data)
 {
        struct obd_export       *exp;
        exp = hlist_entry(hnode, struct obd_export,
index fa36c5a..8389f11 100644 (file)
@@ -103,7 +103,7 @@ static cfs_binheap_ops_t nrs_crrn_heap_ops = {
 #define NRS_NID_BKT_BITS       8
 #define NRS_NID_BITS           16
 
-static unsigned nrs_crrn_hop_hash(cfs_hash_t *hs, const void *key,
+static unsigned nrs_crrn_hop_hash(struct cfs_hash *hs, const void *key,
                                  unsigned mask)
 {
        return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
@@ -131,7 +131,7 @@ static void *nrs_crrn_hop_object(struct hlist_node *hnode)
        return hlist_entry(hnode, struct nrs_crrn_client, cc_hnode);
 }
 
-static void nrs_crrn_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_crrn_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_crrn_client *cli = hlist_entry(hnode,
                                                      struct nrs_crrn_client,
@@ -139,7 +139,7 @@ static void nrs_crrn_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
        atomic_inc(&cli->cc_ref);
 }
 
-static void nrs_crrn_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_crrn_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_crrn_client  *cli = hlist_entry(hnode,
                                                       struct nrs_crrn_client,
@@ -147,7 +147,7 @@ static void nrs_crrn_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
        atomic_dec(&cli->cc_ref);
 }
 
-static void nrs_crrn_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_crrn_hop_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_crrn_client  *cli = hlist_entry(hnode,
                                                       struct nrs_crrn_client,
@@ -159,7 +159,7 @@ static void nrs_crrn_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
        OBD_FREE_PTR(cli);
 }
 
-static cfs_hash_ops_t nrs_crrn_hash_ops = {
+static struct cfs_hash_ops nrs_crrn_hash_ops = {
        .hs_hash        = nrs_crrn_hop_hash,
        .hs_keycmp      = nrs_crrn_hop_keycmp,
        .hs_key         = nrs_crrn_hop_key,
index 0973b69..3a949a4 100644 (file)
@@ -388,7 +388,8 @@ static void nrs_orr_genobjname(struct ptlrpc_nrs_policy *policy, char *name)
 #define NRS_TRR_BKT_BITS       2
 #define NRS_TRR_HASH_FLAGS     CFS_HASH_SPIN_BKTLOCK
 
-static unsigned nrs_orr_hop_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+static unsigned
+nrs_orr_hop_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_djb2_hash(key, sizeof(struct nrs_orr_key), mask);
 }
@@ -416,7 +417,7 @@ static void *nrs_orr_hop_object(struct hlist_node *hnode)
        return hlist_entry(hnode, struct nrs_orr_object, oo_hnode);
 }
 
-static void nrs_orr_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_orr_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_orr_object *orro = hlist_entry(hnode,
                                                      struct nrs_orr_object,
@@ -428,14 +429,14 @@ static void nrs_orr_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
  * Removes an nrs_orr_object the hash and frees its memory, if the object has
  * no active users.
  */
-static void nrs_orr_hop_put_free(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_orr_hop_put_free(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_orr_object *orro = hlist_entry(hnode,
                                                      struct nrs_orr_object,
                                                      oo_hnode);
        struct nrs_orr_data   *orrd = container_of(orro->oo_res.res_parent,
                                                   struct nrs_orr_data, od_res);
-       cfs_hash_bd_t          bd;
+       struct cfs_hash_bd     bd;
 
        cfs_hash_bd_get_and_lock(hs, &orro->oo_key, &bd, 1);
 
@@ -452,7 +453,7 @@ static void nrs_orr_hop_put_free(cfs_hash_t *hs, struct hlist_node *hnode)
        OBD_SLAB_FREE_PTR(orro, orrd->od_cache);
 }
 
-static void nrs_orr_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_orr_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_orr_object *orro = hlist_entry(hnode,
                                                      struct nrs_orr_object,
@@ -469,7 +470,7 @@ static int nrs_trr_hop_keycmp(const void *key, struct hlist_node *hnode)
        return orro->oo_key.ok_idx == ((struct nrs_orr_key *)key)->ok_idx;
 }
 
-static void nrs_trr_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_trr_hop_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_orr_object *orro = hlist_entry(hnode,
                                                      struct nrs_orr_object,
@@ -484,7 +485,7 @@ static void nrs_trr_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
        OBD_SLAB_FREE_PTR(orro, orrd->od_cache);
 }
 
-static cfs_hash_ops_t nrs_orr_hash_ops = {
+static struct cfs_hash_ops nrs_orr_hash_ops = {
        .hs_hash        = nrs_orr_hop_hash,
        .hs_key         = nrs_orr_hop_key,
        .hs_keycmp      = nrs_orr_hop_keycmp,
@@ -494,7 +495,7 @@ static cfs_hash_ops_t nrs_orr_hash_ops = {
        .hs_put_locked  = nrs_orr_hop_put,
 };
 
-static cfs_hash_ops_t nrs_trr_hash_ops = {
+static struct cfs_hash_ops nrs_trr_hash_ops = {
        .hs_hash        = nrs_orr_hop_hash,
        .hs_key         = nrs_orr_hop_key,
        .hs_keycmp      = nrs_trr_hop_keycmp,
@@ -616,7 +617,7 @@ static int nrs_orr_init(struct ptlrpc_nrs_policy *policy)
 static int nrs_orr_start(struct ptlrpc_nrs_policy *policy, char *arg)
 {
        struct nrs_orr_data    *orrd;
-       cfs_hash_ops_t         *ops;
+       struct cfs_hash_ops            *ops;
        unsigned                cur_bits;
        unsigned                max_bits;
        unsigned                bkt_bits;
index d8c6a31..7fb0f4e 100644 (file)
@@ -440,7 +440,7 @@ static cfs_binheap_ops_t nrs_tbf_heap_ops = {
        .hop_compare    = tbf_cli_compare,
 };
 
-static unsigned nrs_tbf_jobid_hop_hash(cfs_hash_t *hs, const void *key,
+static unsigned nrs_tbf_jobid_hop_hash(struct cfs_hash *hs, const void *key,
                                  unsigned mask)
 {
        return cfs_hash_djb2_hash(key, strlen(key), mask);
@@ -469,7 +469,7 @@ static void *nrs_tbf_jobid_hop_object(struct hlist_node *hnode)
        return hlist_entry(hnode, struct nrs_tbf_client, tc_hnode);
 }
 
-static void nrs_tbf_jobid_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_tbf_jobid_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_tbf_client *cli = hlist_entry(hnode,
                                                     struct nrs_tbf_client,
@@ -478,7 +478,7 @@ static void nrs_tbf_jobid_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
        atomic_inc(&cli->tc_ref);
 }
 
-static void nrs_tbf_jobid_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_tbf_jobid_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_tbf_client *cli = hlist_entry(hnode,
                                                     struct nrs_tbf_client,
@@ -487,18 +487,19 @@ static void nrs_tbf_jobid_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
        atomic_dec(&cli->tc_ref);
 }
 
-static void nrs_tbf_jobid_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+static void
+nrs_tbf_jobid_hop_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 
 {
        struct nrs_tbf_client *cli = hlist_entry(hnode,
-                                                    struct nrs_tbf_client,
-                                                    tc_hnode);
+                                                struct nrs_tbf_client,
+                                                tc_hnode);
 
        LASSERT(atomic_read(&cli->tc_ref) == 0);
        nrs_tbf_cli_fini(cli);
 }
 
-static cfs_hash_ops_t nrs_tbf_jobid_hash_ops = {
+static struct cfs_hash_ops nrs_tbf_jobid_hash_ops = {
        .hs_hash        = nrs_tbf_jobid_hop_hash,
        .hs_keycmp      = nrs_tbf_jobid_hop_keycmp,
        .hs_key         = nrs_tbf_jobid_hop_key,
@@ -514,8 +515,8 @@ static cfs_hash_ops_t nrs_tbf_jobid_hash_ops = {
                                  CFS_HASH_DEPTH)
 
 static struct nrs_tbf_client *
-nrs_tbf_jobid_hash_lookup(cfs_hash_t *hs,
-                         cfs_hash_bd_t *bd,
+nrs_tbf_jobid_hash_lookup(struct cfs_hash *hs,
+                         struct cfs_hash_bd *bd,
                          const char *jobid)
 {
        struct hlist_node *hnode;
@@ -542,8 +543,8 @@ nrs_tbf_jobid_cli_find(struct nrs_tbf_head *head,
 {
        const char              *jobid;
        struct nrs_tbf_client   *cli;
-       cfs_hash_t              *hs = head->th_cli_hash;
-       cfs_hash_bd_t            bd;
+       struct cfs_hash         *hs = head->th_cli_hash;
+       struct cfs_hash_bd               bd;
 
        jobid = lustre_msg_get_jobid(req->rq_reqmsg);
        if (jobid == NULL)
@@ -561,8 +562,8 @@ nrs_tbf_jobid_cli_findadd(struct nrs_tbf_head *head,
 {
        const char              *jobid;
        struct nrs_tbf_client   *ret;
-       cfs_hash_t              *hs = head->th_cli_hash;
-       cfs_hash_bd_t            bd;
+       struct cfs_hash         *hs = head->th_cli_hash;
+       struct cfs_hash_bd               bd;
 
        jobid = cli->tc_jobid;
        cfs_hash_bd_get_and_lock(hs, (void *)jobid, &bd, 1);
@@ -580,8 +581,8 @@ static void
 nrs_tbf_jobid_cli_put(struct nrs_tbf_head *head,
                      struct nrs_tbf_client *cli)
 {
-       cfs_hash_bd_t            bd;
-       cfs_hash_t              *hs = head->th_cli_hash;
+       struct cfs_hash_bd               bd;
+       struct cfs_hash         *hs = head->th_cli_hash;
        struct nrs_tbf_bucket   *bkt;
        int                      hw;
        struct list_head        zombies;
@@ -653,7 +654,7 @@ nrs_tbf_jobid_startup(struct ptlrpc_nrs_policy *policy,
        int                      bits;
        int                      i;
        int                      rc;
-       cfs_hash_bd_t            bd;
+       struct cfs_hash_bd       bd;
 
        bits = nrs_tbf_jobid_hash_order();
        if (bits < NRS_TBF_JOBID_BKT_BITS)
@@ -864,7 +865,7 @@ static struct nrs_tbf_ops nrs_tbf_jobid_ops = {
 #define NRS_TBF_NID_BKT_BITS    8
 #define NRS_TBF_NID_BITS        16
 
-static unsigned nrs_tbf_nid_hop_hash(cfs_hash_t *hs, const void *key,
+static unsigned nrs_tbf_nid_hop_hash(struct cfs_hash *hs, const void *key,
                                  unsigned mask)
 {
        return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
@@ -894,7 +895,7 @@ static void *nrs_tbf_nid_hop_object(struct hlist_node *hnode)
        return hlist_entry(hnode, struct nrs_tbf_client, tc_hnode);
 }
 
-static void nrs_tbf_nid_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_tbf_nid_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_tbf_client *cli = hlist_entry(hnode,
                                                     struct nrs_tbf_client,
@@ -903,7 +904,7 @@ static void nrs_tbf_nid_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
        atomic_inc(&cli->tc_ref);
 }
 
-static void nrs_tbf_nid_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_tbf_nid_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_tbf_client *cli = hlist_entry(hnode,
                                                     struct nrs_tbf_client,
@@ -912,7 +913,7 @@ static void nrs_tbf_nid_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
        atomic_dec(&cli->tc_ref);
 }
 
-static void nrs_tbf_nid_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+static void nrs_tbf_nid_hop_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nrs_tbf_client *cli = hlist_entry(hnode,
                                                     struct nrs_tbf_client,
@@ -925,7 +926,7 @@ static void nrs_tbf_nid_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
        nrs_tbf_cli_fini(cli);
 }
 
-static cfs_hash_ops_t nrs_tbf_nid_hash_ops = {
+static struct cfs_hash_ops nrs_tbf_nid_hash_ops = {
        .hs_hash        = nrs_tbf_nid_hop_hash,
        .hs_keycmp      = nrs_tbf_nid_hop_keycmp,
        .hs_key         = nrs_tbf_nid_hop_key,
index 2e335d9..8007f16 100644 (file)
@@ -38,7 +38,8 @@ static int hash_lqs_cur_bits = HASH_LQE_CUR_BITS;
 CFS_MODULE_PARM(hash_lqs_cur_bits, "i", int, 0444,
                 "the current bits of lqe hash");
 
-static unsigned lqe64_hash_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+static unsigned
+lqe64_hash_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_u64_hash(*((__u64 *)key), mask);
 }
@@ -62,28 +63,28 @@ static void *lqe_hash_object(struct hlist_node *hnode)
        return hlist_entry(hnode, struct lquota_entry, lqe_hash);
 }
 
-static void lqe_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lqe_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
        lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        lqe_getref(lqe);
 }
 
-static void lqe_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lqe_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
        lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        lqe_putref(lqe);
 }
 
-static void lqe_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lqe_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        CERROR("Should not have any item left!\n");
 }
 
 /* lqe hash methods for 64-bit uid/gid, new hash functions would have to be
  * defined for per-directory quota relying on a 128-bit FID */
-static cfs_hash_ops_t lqe64_hash_ops = {
+static struct cfs_hash_ops lqe64_hash_ops = {
        .hs_hash       = lqe64_hash_hash,
        .hs_key        = lqe64_hash_key,
        .hs_keycmp     = lqe64_hash_keycmp,
@@ -114,7 +115,7 @@ struct lqe_iter_data {
        bool            lid_free_all;
 };
 
-static int lqe_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int lqe_iter_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                       struct hlist_node *hnode, void *data)
 {
        struct lqe_iter_data *d = (struct lqe_iter_data *)data;
@@ -151,7 +152,7 @@ static int lqe_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * \param free_all - free all entries or only free the entries
  *                   without quota enforce ?
  */
-static void lqe_cleanup(cfs_hash_t *hash, bool free_all)
+static void lqe_cleanup(struct cfs_hash *hash, bool free_all)
 {
        struct lqe_iter_data    d;
        int                     repeat = 0;
index 2bacd89..a60bf5b 100644 (file)
@@ -175,7 +175,7 @@ struct lquota_entry {
  * present.  */
 struct lquota_site {
        /* Hash table storing lquota_entry structures */
-       cfs_hash_t      *lqs_hash;
+       struct cfs_hash *lqs_hash;
 
        /* Quota type, either user or group. */
        int              lqs_qtype;
index d94d65d..e21c78f 100644 (file)
@@ -65,7 +65,7 @@ struct qmt_device {
         * Once we support quota on non-default pools, then more pools will
         * be added to this hash table and pool master setup would have to be
         * handled via configuration logs */
-       cfs_hash_t              *qmt_pool_hash;
+       struct cfs_hash         *qmt_pool_hash;
 
        /* List of pools managed by this master target */
        struct list_head         qmt_pool_list;
index 05da037..55c6dc2 100644 (file)
@@ -86,7 +86,8 @@ static inline void qpi_putref_locked(struct qmt_pool_info *pool)
  * Hash functions for qmt_pool_info management
  */
 
-static unsigned qpi_hash_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+static unsigned
+qpi_hash_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_u32_hash(*((__u32 *)key), mask);
 }
@@ -110,27 +111,27 @@ static void *qpi_hash_object(struct hlist_node *hnode)
        return hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
 }
 
-static void qpi_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void qpi_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct qmt_pool_info *pool;
        pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
        qpi_getref(pool);
 }
 
-static void qpi_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void qpi_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct qmt_pool_info *pool;
        pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
        qpi_putref_locked(pool);
 }
 
-static void qpi_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+static void qpi_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        CERROR("Should not have any item left!\n");
 }
 
 /* vector of hash operations */
-static cfs_hash_ops_t qpi_hash_ops = {
+static struct cfs_hash_ops qpi_hash_ops = {
        .hs_hash        = qpi_hash_hash,
        .hs_key         = qpi_hash_key,
        .hs_keycmp      = qpi_hash_keycmp,
index 03b7741..33e4988 100644 (file)
@@ -553,7 +553,7 @@ void qsd_stop_reint_thread(struct qsd_qtype_info *qqi)
        }
 }
 
-static int qsd_entry_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int qsd_entry_iter_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                             struct hlist_node *hnode, void *data)
 {
        struct lquota_entry     *lqe;