X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=libcfs%2Finclude%2Flibcfs%2Flibcfs_hash.h;h=bdf3cdd37754f6d63e603c63f4fa57e47062d227;hp=bd3d7d0900fc2033f5b31011412b26076e31c3c1;hb=HEAD;hpb=5ce10d8850a3d104193a634ca6ee796299fd6270 diff --git a/libcfs/include/libcfs/libcfs_hash.h b/libcfs/include/libcfs/libcfs_hash.h index bd3d7d0..6fe31a6 100644 --- a/libcfs/include/libcfs/libcfs_hash.h +++ b/libcfs/include/libcfs/libcfs_hash.h @@ -27,7 +27,6 @@ */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * libcfs/include/libcfs/libcfs_hash.h * @@ -38,24 +37,10 @@ #ifndef __LIBCFS_HASH_H__ #define __LIBCFS_HASH_H__ -#include #include #include - -/* - * Knuth recommends primes in approximately golden ratio to the maximum - * integer representable by a machine word for multiplicative hashing. - * Chuck Lever verified the effectiveness of this technique: - * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf - * - * These primes are chosen to be bit-sparse, that is operations on - * them can use shifts and additions instead of multiplications for - * machines where multiplications are slow. - */ -/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ -#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL -/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ -#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL +#include +#include /** disable debug */ #define CFS_HASH_DEBUG_NONE 0 @@ -74,6 +59,7 @@ struct cfs_hash_hlist_ops; union cfs_hash_lock { rwlock_t rw; /**< rwlock */ spinlock_t spin; /**< spinlock */ + struct rw_semaphore rw_sem; /**< rwsem */ }; /** @@ -117,46 +103,49 @@ struct cfs_hash_bd { * common hash attributes. */ enum cfs_hash_tag { - /** - * don't need any lock, caller will protect operations with it's - * own lock. With this flag: - * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK - * will be ignored. - * . Some functions will be disabled with this flag, i.e: - * cfs_hash_for_each_empty, cfs_hash_rehash - */ - CFS_HASH_NO_LOCK = 1 << 0, - /** no bucket lock, use one spinlock to protect the whole hash */ - CFS_HASH_NO_BKTLOCK = 1 << 1, - /** rwlock to protect bucket */ - CFS_HASH_RW_BKTLOCK = 1 << 2, + /** + * don't need any lock, caller will protect operations with it's + * own lock. With this flag: + * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK + * will be ignored. + * . Some functions will be disabled with this flag, i.e: + * cfs_hash_for_each_empty, cfs_hash_rehash + */ + CFS_HASH_NO_LOCK = BIT(0), + /** no bucket lock, use one spinlock to protect the whole hash */ + CFS_HASH_NO_BKTLOCK = BIT(1), + /** rwlock to protect bucket */ + CFS_HASH_RW_BKTLOCK = BIT(2), /** spinlock to protect bucket */ - CFS_HASH_SPIN_BKTLOCK = 1 << 3, - /** always add new item to tail */ - CFS_HASH_ADD_TAIL = 1 << 4, - /** hash-table doesn't have refcount on item */ - CFS_HASH_NO_ITEMREF = 1 << 5, - /** big name for param-tree */ - CFS_HASH_BIGNAME = 1 << 6, - /** track global count */ - CFS_HASH_COUNTER = 1 << 7, - /** rehash item by new key */ - CFS_HASH_REHASH_KEY = 1 << 8, - /** Enable dynamic hash resizing */ - CFS_HASH_REHASH = 1 << 9, - /** can shrink hash-size */ - CFS_HASH_SHRINK = 1 << 10, - /** assert hash is empty on exit */ - CFS_HASH_ASSERT_EMPTY = 1 << 11, - /** record hlist depth */ - CFS_HASH_DEPTH = 1 << 12, - /** - * rehash is always scheduled in a different thread, so current - * change on hash table is non-blocking - */ - CFS_HASH_NBLK_CHANGE = 1 << 13, - /** NB, we typed hs_flags as __u16, please change it - * if you need to extend >=16 flags */ + CFS_HASH_SPIN_BKTLOCK = BIT(3), + /** always add new item to tail */ + CFS_HASH_ADD_TAIL = BIT(4), + /** hash-table doesn't have refcount on item */ + CFS_HASH_NO_ITEMREF = BIT(5), + /** big name for param-tree */ + CFS_HASH_BIGNAME = BIT(6), + /** track global count */ + CFS_HASH_COUNTER = BIT(7), + /** rehash item by new key */ + CFS_HASH_REHASH_KEY = BIT(8), + /** Enable dynamic hash resizing */ + CFS_HASH_REHASH = BIT(9), + /** can shrink hash-size */ + CFS_HASH_SHRINK = BIT(10), + /** assert hash is empty on exit */ + CFS_HASH_ASSERT_EMPTY = BIT(11), + /** record hlist depth */ + CFS_HASH_DEPTH = BIT(12), + /** + * rehash is always scheduled in a different thread, so current + * change on hash table is non-blocking + */ + CFS_HASH_NBLK_CHANGE = BIT(13), + /** rw semaphore lock to protect bucket */ + CFS_HASH_RW_SEM_BKTLOCK = BIT(14), + /** NB, we typed hs_flags as __u16, please change it + * if you need to extend >=16 flags + */ }; /** most used attributes */ @@ -245,7 +234,7 @@ struct cfs_hash { /** rehash workitem */ struct work_struct hs_rehash_work; /** refcount on this hash table */ - atomic_t hs_refcount; + struct kref hs_refcount; /** rehash buckets-table */ struct cfs_hash_bucket **hs_rehash_buckets; #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 @@ -262,8 +251,8 @@ struct cfs_hash { /** workitem to output max depth */ struct work_struct hs_dep_work; #endif - /** name of htable */ - char hs_name[0]; + /** name of htable */ + char hs_name[]; }; struct cfs_hash_lock_ops { @@ -292,7 +281,8 @@ struct cfs_hash_hlist_ops { struct cfs_hash_ops { /** return hashed value from @key */ - unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, unsigned mask); + unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key, + const unsigned int bits); /** return key address of @hnode */ void * (*hs_key)(struct hlist_node *hnode); /** copy key from @hnode to @key */ @@ -360,6 +350,13 @@ cfs_hash_with_spin_bktlock(struct cfs_hash *hs) } static inline int +cfs_hash_with_rw_sem_bktlock(struct cfs_hash *hs) +{ + /* rw sem lock to protect hash bucket */ + return (hs->hs_flags & CFS_HASH_RW_SEM_BKTLOCK) != 0; +} + +static inline int cfs_hash_with_add_tail(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0; @@ -448,10 +445,10 @@ cfs_hash_bkt_size(struct cfs_hash *hs) hs->hs_extra_bytes; } -static inline unsigned -cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask) +static inline unsigned int +cfs_hash_id(struct cfs_hash *hs, const void *key, const unsigned int bits) { - return hs->hs_ops->hs_hash(hs, key, mask); + return hs->hs_ops->hs_hash(hs, key, bits); } static inline void * @@ -614,10 +611,10 @@ void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, static inline int cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd, - atomic_t *condition) + refcount_t *condition) { LASSERT(cfs_hash_with_spin_bktlock(hs)); - return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin); + return refcount_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin); } static inline struct hlist_head * @@ -808,34 +805,16 @@ void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m); * Generic djb2 hash algorithm for character arrays. */ static inline unsigned -cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask) +cfs_hash_djb2_hash(const void *key, size_t size, const unsigned int bits) { - unsigned i, hash = 5381; - - LASSERT(key != NULL); + unsigned int i, hash = 5381; - for (i = 0; i < size; i++) - hash = hash * 33 + ((char *)key)[i]; + LASSERT(key != NULL); - return (hash & mask); -} + for (i = 0; i < size; i++) + hash = hash * 33 + ((char *)key)[i]; -/* - * Generic u32 hash algorithm. - */ -static inline unsigned -cfs_hash_u32_hash(const __u32 key, unsigned mask) -{ - return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask); -} - -/* - * Generic u64 hash algorithm. - */ -static inline unsigned -cfs_hash_u64_hash(const __u64 key, unsigned mask) -{ - return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask); + return (hash & ((1U << bits) - 1)); } /** iterate over all buckets in @bds (array of struct cfs_hash_bd) */