From 9ab294f50ec9fbbf5ba5e5909aacfd7f0d740b3a Mon Sep 17 00:00:00 2001 From: Arshad Hussain Date: Sun, 8 Sep 2024 10:56:51 -0400 Subject: [PATCH] LU-6142 libcfs: Fix style issues for libcfs_hash.h This patch fixes issues reported by checkpatch for file libcfs/include/libcfs/libcfs_hash.h Test-Parameters: trivial Signed-off-by: Arshad Hussain Change-Id: I0826a6ea530b2e8547d52c4123455b6da3d02b46 Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/56302 Tested-by: jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Petros Koutoupis Reviewed-by: Timothy Day Reviewed-by: Oleg Drokin --- libcfs/include/libcfs/libcfs_hash.h | 429 ++++++++++++++++++------------------ 1 file changed, 214 insertions(+), 215 deletions(-) diff --git a/libcfs/include/libcfs/libcfs_hash.h b/libcfs/include/libcfs/libcfs_hash.h index 5d5e998..cc5ec3b 100644 --- a/libcfs/include/libcfs/libcfs_hash.h +++ b/libcfs/include/libcfs/libcfs_hash.h @@ -42,12 +42,13 @@ #include #include -/** disable debug */ +/* disable debug */ #define CFS_HASH_DEBUG_NONE 0 -/** record hash depth and output to console when it's too deep, - * computing overhead is low but consume more memory */ +/* record hash depth and output to console when it's too deep, + * computing overhead is low but consume more memory + */ #define CFS_HASH_DEBUG_1 1 -/** expensive, check key validation */ +/* expensive, check key validation */ #define CFS_HASH_DEBUG_2 2 #define CFS_HASH_DEBUG_LEVEL CFS_HASH_DEBUG_NONE @@ -57,12 +58,12 @@ struct cfs_hash_lock_ops; struct cfs_hash_hlist_ops; union cfs_hash_lock { - rwlock_t rw; /**< rwlock */ - spinlock_t spin; /**< spinlock */ - struct rw_semaphore rw_sem; /**< rwsem */ + rwlock_t rw; /*< rwlock */ + spinlock_t spin; /*< spinlock */ + struct rw_semaphore rw_sem; /*< rwsem */ }; -/** +/* * cfs_hash_bucket is a container of: * - lock, counter ... * - array of hash-head starting from hsb_head[0], hash-head can be one of @@ -74,36 +75,36 @@ union cfs_hash_lock { * - some extra bytes (caller can require it while creating hash) */ struct cfs_hash_bucket { - union cfs_hash_lock hsb_lock; /**< bucket lock */ - __u32 hsb_count; /**< current entries */ - __u32 hsb_version; /**< change version */ - unsigned int hsb_index; /**< index of bucket */ - int hsb_depmax; /**< max depth on bucket */ - long hsb_head[]; /**< hash-head array */ + union cfs_hash_lock hsb_lock; /*< bucket lock */ + __u32 hsb_count; /*< current entries */ + __u32 hsb_version; /*< change version */ + unsigned int hsb_index; /*< index of bucket */ + int hsb_depmax; /*< max depth on bucket */ + long hsb_head[]; /*< hash-head array */ }; -/** +/* * cfs_hash bucket descriptor, it's normally in stack of caller */ struct cfs_hash_bd { - /**< address of bucket */ + /*< address of bucket */ struct cfs_hash_bucket *bd_bucket; - /**< offset in bucket */ + /*< offset in bucket */ unsigned int bd_offset; }; -#define CFS_HASH_NAME_LEN 16 /**< default name length */ -#define CFS_HASH_BIGNAME_LEN 64 /**< bigname for param tree */ +#define CFS_HASH_NAME_LEN 16 /*< default name length */ +#define CFS_HASH_BIGNAME_LEN 64 /*< bigname for param tree */ -#define CFS_HASH_BKT_BITS 3 /**< default bits of bucket */ -#define CFS_HASH_BITS_MAX 30 /**< max bits of bucket */ +#define CFS_HASH_BKT_BITS 3 /*< default bits of bucket */ +#define CFS_HASH_BITS_MAX 30 /*< max bits of bucket */ #define CFS_HASH_BITS_MIN CFS_HASH_BKT_BITS -/** +/* * common hash attributes. */ enum cfs_hash_tag { - /** + /* * don't need any lock, caller will protect operations with it's * own lock. With this flag: * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK @@ -112,47 +113,47 @@ enum cfs_hash_tag { * cfs_hash_for_each_empty, cfs_hash_rehash */ CFS_HASH_NO_LOCK = BIT(0), - /** no bucket lock, use one spinlock to protect the whole hash */ + /* no bucket lock, use one spinlock to protect the whole hash */ CFS_HASH_NO_BKTLOCK = BIT(1), - /** rwlock to protect bucket */ + /* rwlock to protect bucket */ CFS_HASH_RW_BKTLOCK = BIT(2), - /** spinlock to protect bucket */ + /* spinlock to protect bucket */ CFS_HASH_SPIN_BKTLOCK = BIT(3), - /** always add new item to tail */ + /* always add new item to tail */ CFS_HASH_ADD_TAIL = BIT(4), - /** hash-table doesn't have refcount on item */ + /* hash-table doesn't have refcount on item */ CFS_HASH_NO_ITEMREF = BIT(5), - /** big name for param-tree */ + /* big name for param-tree */ CFS_HASH_BIGNAME = BIT(6), - /** track global count */ + /* track global count */ CFS_HASH_COUNTER = BIT(7), - /** rehash item by new key */ + /* rehash item by new key */ CFS_HASH_REHASH_KEY = BIT(8), - /** Enable dynamic hash resizing */ + /* Enable dynamic hash resizing */ CFS_HASH_REHASH = BIT(9), - /** can shrink hash-size */ + /* can shrink hash-size */ CFS_HASH_SHRINK = BIT(10), - /** assert hash is empty on exit */ + /* assert hash is empty on exit */ CFS_HASH_ASSERT_EMPTY = BIT(11), - /** record hlist depth */ + /* record hlist depth */ CFS_HASH_DEPTH = BIT(12), - /** + /* * rehash is always scheduled in a different thread, so current * change on hash table is non-blocking */ CFS_HASH_NBLK_CHANGE = BIT(13), - /** rw semaphore lock to protect bucket */ + /* rw semaphore lock to protect bucket */ CFS_HASH_RW_SEM_BKTLOCK = BIT(14), - /** NB, we typed hs_flags as __u16, please change it + /* NB, we typed hs_flags as __u16, please change it * if you need to extend >=16 flags */ }; -/** most used attributes */ -#define CFS_HASH_DEFAULT (CFS_HASH_RW_BKTLOCK | \ - CFS_HASH_COUNTER | CFS_HASH_REHASH) +/* most used attributes */ +#define CFS_HASH_DEFAULT (CFS_HASH_RW_BKTLOCK | \ + CFS_HASH_COUNTER | CFS_HASH_REHASH) -/** +/* * cfs_hash is a hash-table implementation for general purpose, it can support: * . two refcount modes * hash-table with & without refcount @@ -192,161 +193,163 @@ enum cfs_hash_tag { */ struct cfs_hash { - /** serialize with rehash, or serialize all operations if - * the hash-table has CFS_HASH_NO_BKTLOCK */ + /* serialize with rehash, or serialize all operations if + * the hash-table has CFS_HASH_NO_BKTLOCK + */ union cfs_hash_lock hs_lock; - /** hash operations */ + /* hash operations */ struct cfs_hash_ops *hs_ops; - /** hash lock operations */ + /* hash lock operations */ struct cfs_hash_lock_ops *hs_lops; - /** hash list operations */ + /* hash list operations */ struct cfs_hash_hlist_ops *hs_hops; - /** hash buckets-table */ + /* hash buckets-table */ struct cfs_hash_bucket **hs_buckets; - /** total number of items on this hash-table */ + /* total number of items on this hash-table */ atomic_t hs_count; - /** hash flags, see cfs_hash_tag for detail */ + /* hash flags, see cfs_hash_tag for detail */ __u16 hs_flags; - /** # of extra-bytes for bucket, for user saving extended attributes */ - __u16 hs_extra_bytes; - /** wants to iterate */ - __u8 hs_iterating; - /** hash-table is dying */ - __u8 hs_exiting; - /** current hash bits */ - __u8 hs_cur_bits; - /** min hash bits */ - __u8 hs_min_bits; - /** max hash bits */ - __u8 hs_max_bits; - /** bits for rehash */ - __u8 hs_rehash_bits; - /** bits for each bucket */ - __u8 hs_bkt_bits; - /** resize min threshold */ - __u16 hs_min_theta; - /** resize max threshold */ - __u16 hs_max_theta; - /** resize count */ - __u32 hs_rehash_count; - /** # of iterators (caller of cfs_hash_for_each_*) */ - __u32 hs_iterators; - /** rehash workitem */ + /* # of extra-bytes for bucket, for user saving extended attributes */ + __u16 hs_extra_bytes; + /* wants to iterate */ + __u8 hs_iterating; + /* hash-table is dying */ + __u8 hs_exiting; + /* current hash bits */ + __u8 hs_cur_bits; + /* min hash bits */ + __u8 hs_min_bits; + /* max hash bits */ + __u8 hs_max_bits; + /* bits for rehash */ + __u8 hs_rehash_bits; + /* bits for each bucket */ + __u8 hs_bkt_bits; + /* resize min threshold */ + __u16 hs_min_theta; + /* resize max threshold */ + __u16 hs_max_theta; + /* resize count */ + __u32 hs_rehash_count; + /* # of iterators (caller of cfs_hash_for_each_*) */ + __u32 hs_iterators; + /* rehash workitem */ struct work_struct hs_rehash_work; - /** refcount on this hash table */ + /* refcount on this hash table */ struct kref hs_refcount; - /** rehash buckets-table */ + /* rehash buckets-table */ struct cfs_hash_bucket **hs_rehash_buckets; #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 - /** serialize debug members */ + /* serialize debug members */ spinlock_t hs_dep_lock; - /** max depth */ - unsigned int hs_dep_max; - /** id of the deepest bucket */ - unsigned int hs_dep_bkt; - /** offset in the deepest bucket */ - unsigned int hs_dep_off; - /** bits when we found the max depth */ - unsigned int hs_dep_bits; - /** workitem to output max depth */ + /* max depth */ + unsigned int hs_dep_max; + /* id of the deepest bucket */ + unsigned int hs_dep_bkt; + /* offset in the deepest bucket */ + unsigned int hs_dep_off; + /* bits when we found the max depth */ + unsigned int hs_dep_bits; + /* workitem to output max depth */ struct work_struct hs_dep_work; #endif - /** name of htable */ + /* name of htable */ char hs_name[]; }; struct cfs_hash_lock_ops { - /** lock the hash table */ + /* lock the hash table */ void (*hs_lock)(union cfs_hash_lock *lock, int exclusive); - /** unlock the hash table */ + /* unlock the hash table */ void (*hs_unlock)(union cfs_hash_lock *lock, int exclusive); - /** lock the hash bucket */ + /* lock the hash bucket */ void (*hs_bkt_lock)(union cfs_hash_lock *lock, int exclusive); - /** unlock the hash bucket */ + /* unlock the hash bucket */ void (*hs_bkt_unlock)(union cfs_hash_lock *lock, int exclusive); }; struct cfs_hash_hlist_ops { - /** return hlist_head of hash-head of @bd */ - struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, struct cfs_hash_bd *bd); - /** return hash-head size */ + /* return hlist_head of hash-head of @bd */ + struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, + struct cfs_hash_bd *bd); + /* return hash-head size */ int (*hop_hhead_size)(struct cfs_hash *hs); - /** add @hnode to hash-head of @bd */ + /* add @hnode to hash-head of @bd */ int (*hop_hnode_add)(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode); - /** remove @hnode from hash-head of @bd */ + /* remove @hnode from hash-head of @bd */ int (*hop_hnode_del)(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode); }; struct cfs_hash_ops { - /** return hashed value from @key */ + /* return hashed value from @key */ unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key, const unsigned int bits); - /** return key address of @hnode */ + /* return key address of @hnode */ void * (*hs_key)(struct hlist_node *hnode); - /** copy key from @hnode to @key */ + /* copy key from @hnode to @key */ void (*hs_keycpy)(struct hlist_node *hnode, void *key); - /** + /* * compare @key with key of @hnode * returns 1 on a match */ int (*hs_keycmp)(const void *key, struct hlist_node *hnode); - /** return object address of @hnode, i.e: container_of(...hnode) */ + /* return object address of @hnode, i.e: container_of(...hnode) */ void * (*hs_object)(struct hlist_node *hnode); - /** get refcount of item, always called with holding bucket-lock */ + /* get refcount of item, always called with holding bucket-lock */ void (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode); - /** release refcount of item */ + /* release refcount of item */ void (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode); - /** release refcount of item, always called with holding bucket-lock */ - void (*hs_put_locked)(struct cfs_hash *hs, struct hlist_node *hnode); - /** it's called before removing of @hnode */ + /* release refcount of item, always called with holding bucket-lock */ + void (*hs_put_locked)(struct cfs_hash *hs, struct hlist_node *node); + /* it's called before removing of @hnode */ void (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode); }; -/** total number of buckets in @hs */ +/* total number of buckets in @hs */ #define CFS_HASH_NBKT(hs) \ - (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits)) + (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits)) -/** total number of buckets in @hs while rehashing */ +/* total number of buckets in @hs while rehashing */ #define CFS_HASH_RH_NBKT(hs) \ - (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits)) + (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits)) -/** number of hlist for in bucket */ +/* number of hlist for in bucket */ #define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits) -/** total number of hlist in @hs */ +/* total number of hlist in @hs */ #define CFS_HASH_NHLIST(hs) (1U << (hs)->hs_cur_bits) -/** total number of hlist in @hs while rehashing */ +/* total number of hlist in @hs while rehashing */ #define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits) static inline int cfs_hash_with_no_lock(struct cfs_hash *hs) { - /* caller will serialize all operations for this hash-table */ - return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0; + /* caller will serialize all operations for this hash-table */ + return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0; } static inline int cfs_hash_with_no_bktlock(struct cfs_hash *hs) { - /* no bucket lock, one single lock to protect the hash-table */ - return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0; + /* no bucket lock, one single lock to protect the hash-table */ + return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0; } static inline int cfs_hash_with_rw_bktlock(struct cfs_hash *hs) { - /* rwlock to protect hash bucket */ - return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0; + /* rwlock to protect hash bucket */ + return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0; } static inline int cfs_hash_with_spin_bktlock(struct cfs_hash *hs) { - /* spinlock to protect hash bucket */ - return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0; + /* spinlock to protect hash bucket */ + return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0; } static inline int @@ -359,90 +362,93 @@ cfs_hash_with_rw_sem_bktlock(struct cfs_hash *hs) static inline int cfs_hash_with_add_tail(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0; + return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0; } static inline int cfs_hash_with_no_itemref(struct cfs_hash *hs) { - /* hash-table doesn't keep refcount on item, - * item can't be removed from hash unless it's - * ZERO refcount */ - return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0; + /* hash-table doesn't keep refcount on item, item can't be removed from + * hash unless it's ZERO refcount + */ + return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0; } static inline int cfs_hash_with_bigname(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_BIGNAME) != 0; + return (hs->hs_flags & CFS_HASH_BIGNAME) != 0; } static inline int cfs_hash_with_counter(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_COUNTER) != 0; + return (hs->hs_flags & CFS_HASH_COUNTER) != 0; } static inline int cfs_hash_with_rehash(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_REHASH) != 0; + return (hs->hs_flags & CFS_HASH_REHASH) != 0; } static inline int cfs_hash_with_rehash_key(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0; + return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0; } static inline int cfs_hash_with_shrink(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_SHRINK) != 0; + return (hs->hs_flags & CFS_HASH_SHRINK) != 0; } static inline int cfs_hash_with_assert_empty(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0; + return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0; } static inline int cfs_hash_with_depth(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_DEPTH) != 0; + return (hs->hs_flags & CFS_HASH_DEPTH) != 0; } static inline int cfs_hash_with_nblk_change(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0; + return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0; } static inline int cfs_hash_is_exiting(struct cfs_hash *hs) -{ /* cfs_hash_destroy is called */ - return hs->hs_exiting; +{ + /* cfs_hash_destroy is called */ + return hs->hs_exiting; } static inline int cfs_hash_is_rehashing(struct cfs_hash *hs) -{ /* rehash is launched */ - return hs->hs_rehash_bits != 0; +{ + /* rehash is launched */ + return hs->hs_rehash_bits != 0; } static inline int cfs_hash_is_iterating(struct cfs_hash *hs) -{ /* someone is calling cfs_hash_for_each_* */ - return hs->hs_iterating || hs->hs_iterators != 0; +{ + /* someone is calling cfs_hash_for_each_* */ + return hs->hs_iterating || hs->hs_iterators != 0; } static inline int cfs_hash_bkt_size(struct cfs_hash *hs) { return offsetof(struct cfs_hash_bucket, hsb_head[0]) + - hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) + - hs->hs_extra_bytes; + hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) + + hs->hs_extra_bytes; } static inline unsigned int @@ -464,7 +470,7 @@ cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key) hs->hs_ops->hs_keycpy(hnode, key); } -/** +/* * Returns 1 on a match, */ static inline int @@ -506,12 +512,12 @@ cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode) static inline void cfs_hash_lock(struct cfs_hash *hs, int excl) { - hs->hs_lops->hs_lock(&hs->hs_lock, excl); + hs->hs_lops->hs_lock(&hs->hs_lock, excl); } static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl) { - hs->hs_lops->hs_unlock(&hs->hs_lock, excl); + hs->hs_lops->hs_unlock(&hs->hs_lock, excl); } static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs, @@ -524,16 +530,16 @@ static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs, static inline void cfs_hash_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd, int excl) { - hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl); + hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl); } static inline void cfs_hash_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bd, int excl) { - hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl); + hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl); } -/** +/* * operations on cfs_hash bucket (bd: bucket descriptor), * they are normally for hash-table without rehash */ @@ -544,61 +550,61 @@ static inline void cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd, int excl) { - cfs_hash_bd_get(hs, key, bd); - cfs_hash_bd_lock(hs, bd, excl); + cfs_hash_bd_get(hs, key, bd); + cfs_hash_bd_lock(hs, bd, excl); } static inline unsigned cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd) { - return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits); + return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits); } static inline void -cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index, +cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned int index, struct cfs_hash_bd *bd) { - bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits]; - bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U); + bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits]; + bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U); } static inline void * cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd) { - return (void *)bd->bd_bucket + - cfs_hash_bkt_size(hs) - hs->hs_extra_bytes; + return (void *)bd->bd_bucket + + cfs_hash_bkt_size(hs) - hs->hs_extra_bytes; } static inline __u32 cfs_hash_bd_version_get(struct cfs_hash_bd *bd) { - /* need hold cfs_hash_bd_lock */ - return bd->bd_bucket->hsb_version; + /* need hold cfs_hash_bd_lock */ + return bd->bd_bucket->hsb_version; } static inline __u32 cfs_hash_bd_count_get(struct cfs_hash_bd *bd) { - /* need hold cfs_hash_bd_lock */ - return bd->bd_bucket->hsb_count; + /* need hold cfs_hash_bd_lock */ + return bd->bd_bucket->hsb_count; } static inline int cfs_hash_bd_depmax_get(struct cfs_hash_bd *bd) { - return bd->bd_bucket->hsb_depmax; + return bd->bd_bucket->hsb_depmax; } static inline int cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2) { - if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index) - return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index; + if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index) + return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index; - if (bd1->bd_offset != bd2->bd_offset) - return bd1->bd_offset - bd2->bd_offset; + if (bd1->bd_offset != bd2->bd_offset) + return bd1->bd_offset - bd2->bd_offset; - return 0; + return 0; } void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, @@ -637,7 +643,7 @@ struct hlist_node * cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key, struct hlist_node *hnode); -/** +/* * operations on cfs_hash bucket (bd: bucket descriptor), * they are safe for hash-table with rehash */ @@ -669,10 +675,10 @@ cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, /* Hash init/cleanup functions */ struct cfs_hash * -cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, - unsigned bkt_bits, unsigned extra_bytes, - unsigned min_theta, unsigned max_theta, - struct cfs_hash_ops *ops, unsigned flags); +cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits, + unsigned int bkt_bits, unsigned int extra_bytes, + unsigned int min_theta, unsigned int max_theta, + struct cfs_hash_ops *ops, unsigned int flags); struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs); void cfs_hash_putref(struct cfs_hash *hs); @@ -697,28 +703,23 @@ typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *node, void *data); -void * -cfs_hash_lookup(struct cfs_hash *hs, const void *key); -void -cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); -void -cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); -int -cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t, - void *data, int start); -int -cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t, - void *data); -void -cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, - cfs_hash_for_each_cb_t, void *data); +void *cfs_hash_lookup(struct cfs_hash *hs, const void *key); +void cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data); +void cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data); +int cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data, int start); +int cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data); +void cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, + cfs_hash_for_each_cb_t func, void *data); typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data); -void -cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data); +void cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, + void *data); -void -cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, - cfs_hash_for_each_cb_t, void *data); +void cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex, + cfs_hash_for_each_cb_t func, void *data); int cfs_hash_is_empty(struct cfs_hash *hs); __u64 cfs_hash_size_get(struct cfs_hash *hs); @@ -772,14 +773,14 @@ cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd, /* Return integer component of theta */ static inline int __cfs_hash_theta_int(int theta) { - return (theta >> CFS_HASH_THETA_BITS); + return (theta >> CFS_HASH_THETA_BITS); } /* Return a fractional value between 0 and 999 */ static inline int __cfs_hash_theta_frac(int theta) { - return ((theta * 1000) >> CFS_HASH_THETA_BITS) - - (__cfs_hash_theta_int(theta) * 1000); + return ((theta * 1000) >> CFS_HASH_THETA_BITS) - + (__cfs_hash_theta_int(theta) * 1000); } static inline int __cfs_hash_theta(struct cfs_hash *hs) @@ -791,9 +792,9 @@ static inline int __cfs_hash_theta(struct cfs_hash *hs) static inline void __cfs_hash_set_theta(struct cfs_hash *hs, int min, int max) { - LASSERT(min < max); - hs->hs_min_theta = (__u16)min; - hs->hs_max_theta = (__u16)max; + LASSERT(min < max); + hs->hs_min_theta = (__u16)min; + hs->hs_max_theta = (__u16)max; } /* Generic debug formatting routines mainly for proc handler */ @@ -801,9 +802,7 @@ struct seq_file; void cfs_hash_debug_header(struct seq_file *m); void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m); -/* - * Generic djb2 hash algorithm for character arrays. - */ +/* Generic djb2 hash algorithm for character arrays. */ static inline unsigned cfs_hash_djb2_hash(const void *key, size_t size, const unsigned int bits) { @@ -817,22 +816,22 @@ cfs_hash_djb2_hash(const void *key, size_t size, const unsigned int bits) return (hash & ((1U << bits) - 1)); } -/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */ -#define cfs_hash_for_each_bd(bds, n, i) \ - for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++) +/* iterate over all buckets in @bds (array of struct cfs_hash_bd) */ +#define cfs_hash_for_each_bd(bds, n, i) \ + for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++) -/** iterate over all buckets of @hs */ -#define cfs_hash_for_each_bucket(hs, bd, pos) \ - for (pos = 0; \ - pos < CFS_HASH_NBKT(hs) && \ - ((bd)->bd_bucket = (hs)->hs_buckets[pos]) != NULL; pos++) +/* iterate over all buckets of @hs */ +#define cfs_hash_for_each_bucket(hs, bd, pos) \ + for (pos = 0; \ + pos < CFS_HASH_NBKT(hs) && \ + ((bd)->bd_bucket = (hs)->hs_buckets[pos]) != NULL; pos++) -/** iterate over all hlist of bucket @bd */ -#define cfs_hash_bd_for_each_hlist(hs, bd, hlist) \ - for ((bd)->bd_offset = 0; \ - (bd)->bd_offset < CFS_HASH_BKT_NHLIST(hs) && \ - (hlist = cfs_hash_bd_hhead(hs, bd)) != NULL; \ - (bd)->bd_offset++) +/* iterate over all hlist of bucket @bd */ +#define cfs_hash_bd_for_each_hlist(hs, bd, hlist) \ + for ((bd)->bd_offset = 0; \ + (bd)->bd_offset < CFS_HASH_BKT_NHLIST(hs) && \ + (hlist = cfs_hash_bd_hhead(hs, bd)) != NULL; \ + (bd)->bd_offset++) /* !__LIBCFS__HASH_H__ */ #endif -- 1.8.3.1