*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* libcfs/include/libcfs/libcfs_hash.h
*
#ifndef __LIBCFS_HASH_H__
#define __LIBCFS_HASH_H__
-#include <linux/hash.h>
-
-/*
- * Knuth recommends primes in approximately golden ratio to the maximum
- * integer representable by a machine word for multiplicative hashing.
- * Chuck Lever verified the effectiveness of this technique:
- * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
- *
- * These primes are chosen to be bit-sparse, that is operations on
- * them can use shifts and additions instead of multiplications for
- * machines where multiplications are slow.
- */
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
-/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/refcount.h>
+#include <libcfs/linux/linux-hash.h>
/** disable debug */
#define CFS_HASH_DEBUG_NONE 0
union cfs_hash_lock {
rwlock_t rw; /**< rwlock */
spinlock_t spin; /**< spinlock */
+ struct rw_semaphore rw_sem; /**< rwsem */
};
/**
* common hash attributes.
*/
enum cfs_hash_tag {
- /**
- * don't need any lock, caller will protect operations with it's
- * own lock. With this flag:
- * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK
- * will be ignored.
- * . Some functions will be disabled with this flag, i.e:
- * cfs_hash_for_each_empty, cfs_hash_rehash
- */
- CFS_HASH_NO_LOCK = 1 << 0,
- /** no bucket lock, use one spinlock to protect the whole hash */
- CFS_HASH_NO_BKTLOCK = 1 << 1,
- /** rwlock to protect bucket */
- CFS_HASH_RW_BKTLOCK = 1 << 2,
+ /**
+ * don't need any lock, caller will protect operations with it's
+ * own lock. With this flag:
+ * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK
+ * will be ignored.
+ * . Some functions will be disabled with this flag, i.e:
+ * cfs_hash_for_each_empty, cfs_hash_rehash
+ */
+ CFS_HASH_NO_LOCK = BIT(0),
+ /** no bucket lock, use one spinlock to protect the whole hash */
+ CFS_HASH_NO_BKTLOCK = BIT(1),
+ /** rwlock to protect bucket */
+ CFS_HASH_RW_BKTLOCK = BIT(2),
/** spinlock to protect bucket */
- CFS_HASH_SPIN_BKTLOCK = 1 << 3,
- /** always add new item to tail */
- CFS_HASH_ADD_TAIL = 1 << 4,
- /** hash-table doesn't have refcount on item */
- CFS_HASH_NO_ITEMREF = 1 << 5,
- /** big name for param-tree */
- CFS_HASH_BIGNAME = 1 << 6,
- /** track global count */
- CFS_HASH_COUNTER = 1 << 7,
- /** rehash item by new key */
- CFS_HASH_REHASH_KEY = 1 << 8,
- /** Enable dynamic hash resizing */
- CFS_HASH_REHASH = 1 << 9,
- /** can shrink hash-size */
- CFS_HASH_SHRINK = 1 << 10,
- /** assert hash is empty on exit */
- CFS_HASH_ASSERT_EMPTY = 1 << 11,
- /** record hlist depth */
- CFS_HASH_DEPTH = 1 << 12,
- /**
- * rehash is always scheduled in a different thread, so current
- * change on hash table is non-blocking
- */
- CFS_HASH_NBLK_CHANGE = 1 << 13,
- /** NB, we typed hs_flags as __u16, please change it
- * if you need to extend >=16 flags */
+ CFS_HASH_SPIN_BKTLOCK = BIT(3),
+ /** always add new item to tail */
+ CFS_HASH_ADD_TAIL = BIT(4),
+ /** hash-table doesn't have refcount on item */
+ CFS_HASH_NO_ITEMREF = BIT(5),
+ /** big name for param-tree */
+ CFS_HASH_BIGNAME = BIT(6),
+ /** track global count */
+ CFS_HASH_COUNTER = BIT(7),
+ /** rehash item by new key */
+ CFS_HASH_REHASH_KEY = BIT(8),
+ /** Enable dynamic hash resizing */
+ CFS_HASH_REHASH = BIT(9),
+ /** can shrink hash-size */
+ CFS_HASH_SHRINK = BIT(10),
+ /** assert hash is empty on exit */
+ CFS_HASH_ASSERT_EMPTY = BIT(11),
+ /** record hlist depth */
+ CFS_HASH_DEPTH = BIT(12),
+ /**
+ * rehash is always scheduled in a different thread, so current
+ * change on hash table is non-blocking
+ */
+ CFS_HASH_NBLK_CHANGE = BIT(13),
+ /** rw semaphore lock to protect bucket */
+ CFS_HASH_RW_SEM_BKTLOCK = BIT(14),
+ /** NB, we typed hs_flags as __u16, please change it
+ * if you need to extend >=16 flags
+ */
};
/** most used attributes */
/** # of iterators (caller of cfs_hash_for_each_*) */
__u32 hs_iterators;
/** rehash workitem */
- struct cfs_workitem hs_rehash_wi;
+ struct work_struct hs_rehash_work;
/** refcount on this hash table */
- atomic_t hs_refcount;
+ struct kref hs_refcount;
/** rehash buckets-table */
struct cfs_hash_bucket **hs_rehash_buckets;
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
/** bits when we found the max depth */
unsigned int hs_dep_bits;
/** workitem to output max depth */
- struct cfs_workitem hs_dep_wi;
+ struct work_struct hs_dep_work;
#endif
- /** name of htable */
- char hs_name[0];
+ /** name of htable */
+ char hs_name[];
};
struct cfs_hash_lock_ops {
struct cfs_hash_ops {
/** return hashed value from @key */
- unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, unsigned mask);
+ unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key,
+ const unsigned int bits);
/** return key address of @hnode */
void * (*hs_key)(struct hlist_node *hnode);
/** copy key from @hnode to @key */
}
static inline int
+cfs_hash_with_rw_sem_bktlock(struct cfs_hash *hs)
+{
+ /* rw sem lock to protect hash bucket */
+ return (hs->hs_flags & CFS_HASH_RW_SEM_BKTLOCK) != 0;
+}
+
+static inline int
cfs_hash_with_add_tail(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
hs->hs_extra_bytes;
}
-static inline unsigned
-cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
+static inline unsigned int
+cfs_hash_id(struct cfs_hash *hs, const void *key, const unsigned int bits)
{
- return hs->hs_ops->hs_hash(hs, key, mask);
+ return hs->hs_ops->hs_hash(hs, key, bits);
}
static inline void *
static inline int
cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- atomic_t *condition)
+ refcount_t *condition)
{
LASSERT(cfs_hash_with_spin_bktlock(hs));
- return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
+ return refcount_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
}
static inline struct hlist_head *
*/
void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
void cfs_hash_rehash_cancel(struct cfs_hash *hs);
-int cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
+void cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
void *new_key, struct hlist_node *hnode);
/* Generic debug formatting routines mainly for proc handler */
struct seq_file;
-int cfs_hash_debug_header(struct seq_file *m);
-int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
+void cfs_hash_debug_header(struct seq_file *m);
+void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
/*
* Generic djb2 hash algorithm for character arrays.
*/
static inline unsigned
-cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
+cfs_hash_djb2_hash(const void *key, size_t size, const unsigned int bits)
{
- unsigned i, hash = 5381;
-
- LASSERT(key != NULL);
+ unsigned int i, hash = 5381;
- for (i = 0; i < size; i++)
- hash = hash * 33 + ((char *)key)[i];
+ LASSERT(key != NULL);
- return (hash & mask);
-}
+ for (i = 0; i < size; i++)
+ hash = hash * 33 + ((char *)key)[i];
-/*
- * Generic u32 hash algorithm.
- */
-static inline unsigned
-cfs_hash_u32_hash(const __u32 key, unsigned mask)
-{
- return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
-}
-
-/*
- * Generic u64 hash algorithm.
- */
-static inline unsigned
-cfs_hash_u64_hash(const __u64 key, unsigned mask)
-{
- return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
+ return (hash & ((1U << bits) - 1));
}
/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */