-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct cfs_hash_hlist_ops;
typedef union {
- cfs_rwlock_t rw; /**< rwlock */
- cfs_spinlock_t spin; /**< spinlock */
+ rwlock_t rw; /**< rwlock */
+ spinlock_t spin; /**< spinlock */
} cfs_hash_lock_t;
/**
* - some extra bytes (caller can require it while creating hash)
*/
typedef struct cfs_hash_bucket {
- cfs_hash_lock_t hsb_lock; /**< bucket lock */
- __u32 hsb_count; /**< current entries */
- __u32 hsb_version; /**< change version */
- unsigned int hsb_index; /**< index of bucket */
- int hsb_depmax; /**< max depth on bucket */
- char hsb_head[0]; /**< hash-head array */
+ cfs_hash_lock_t hsb_lock; /**< bucket lock */
+ __u32 hsb_count; /**< current entries */
+ __u32 hsb_version; /**< change version */
+ unsigned int hsb_index; /**< index of bucket */
+ int hsb_depmax; /**< max depth on bucket */
+ long hsb_head[0]; /**< hash-head array */
} cfs_hash_bucket_t;
/**
/** hash list operations */
struct cfs_hash_hlist_ops *hs_hops;
/** hash buckets-table */
- cfs_hash_bucket_t **hs_buckets;
- /** total number of items on this hash-table */
- cfs_atomic_t hs_count;
- /** hash flags, see cfs_hash_tag for detail */
- __u16 hs_flags;
- /** # of extra-bytes for bucket, for user saving extended attributes */
+ cfs_hash_bucket_t **hs_buckets;
+ /** total number of items on this hash-table */
+ atomic_t hs_count;
+ /** hash flags, see cfs_hash_tag for detail */
+ __u16 hs_flags;
+ /** # of extra-bytes for bucket, for user saving extended attributes */
__u16 hs_extra_bytes;
/** wants to iterate */
__u8 hs_iterating;
__u32 hs_rehash_count;
/** # of iterators (caller of cfs_hash_for_each_*) */
__u32 hs_iterators;
- /** rehash workitem */
- cfs_workitem_t hs_rehash_wi;
- /** refcount on this hash table */
- cfs_atomic_t hs_refcount;
- /** rehash buckets-table */
- cfs_hash_bucket_t **hs_rehash_buckets;
+ /** rehash workitem */
+ cfs_workitem_t hs_rehash_wi;
+ /** refcount on this hash table */
+ atomic_t hs_refcount;
+ /** rehash buckets-table */
+ cfs_hash_bucket_t **hs_rehash_buckets;
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
/** serialize debug members */
- cfs_spinlock_t hs_dep_lock;
+ spinlock_t hs_dep_lock;
/** max depth */
unsigned int hs_dep_max;
/** id of the deepest bucket */
typedef struct cfs_hash_ops {
/** return hashed value from @key */
- unsigned (*hs_hash)(cfs_hash_t *hs, void *key, unsigned mask);
+ unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask);
/** return key address of @hnode */
void * (*hs_key)(cfs_hlist_node_t *hnode);
/** copy key from @hnode to @key */
void (*hs_keycpy)(cfs_hlist_node_t *hnode, void *key);
- /** compare @key with key of @hnode */
- int (*hs_keycmp)(void *key, cfs_hlist_node_t *hnode);
+ /**
+ * compare @key with key of @hnode
+ * returns 1 on a match
+ */
+ int (*hs_keycmp)(const void *key, cfs_hlist_node_t *hnode);
/** return object address of @hnode, i.e: container_of(...hnode) */
void * (*hs_object)(cfs_hlist_node_t *hnode);
/** get refcount of item, always called with holding bucket-lock */
- void * (*hs_get)(cfs_hlist_node_t *hnode);
+ void (*hs_get)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
/** release refcount of item */
- void * (*hs_put)(cfs_hlist_node_t *hnode);
+ void (*hs_put)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
/** release refcount of item, always called with holding bucket-lock */
- void * (*hs_put_locked)(cfs_hlist_node_t *hnode);
+ void (*hs_put_locked)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
/** it's called before removing of @hnode */
- void (*hs_exit)(cfs_hlist_node_t *hnode);
+ void (*hs_exit)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
} cfs_hash_ops_t;
/** total number of buckets in @hs */
#define CFS_HOP(hs, op) (hs)->hs_ops->hs_ ## op
static inline unsigned
-cfs_hash_id(cfs_hash_t *hs, void *key, unsigned mask)
+cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask)
{
return CFS_HOP(hs, hash)(hs, key, mask);
}
* Returns 1 on a match,
*/
static inline int
-cfs_hash_keycmp(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
+cfs_hash_keycmp(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
{
return CFS_HOP(hs, keycmp)(key, hnode);
}
return CFS_HOP(hs, object)(hnode);
}
-static inline void *
+static inline void
cfs_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
- return CFS_HOP(hs, get)(hnode);
+ return CFS_HOP(hs, get)(hs, hnode);
}
-static inline void *
+static inline void
cfs_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
LASSERT(CFS_HOP(hs, put_locked) != NULL);
- return CFS_HOP(hs, put_locked)(hnode);
+ return CFS_HOP(hs, put_locked)(hs, hnode);
}
-static inline void *
+static inline void
cfs_hash_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
LASSERT(CFS_HOP(hs, put) != NULL);
- return CFS_HOP(hs, put)(hnode);
+ return CFS_HOP(hs, put)(hs, hnode);
}
static inline void
cfs_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
if (CFS_HOP(hs, exit))
- CFS_HOP(hs, exit)(hnode);
+ CFS_HOP(hs, exit)(hs, hnode);
}
static inline void cfs_hash_lock(cfs_hash_t *hs, int excl)
hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
}
+static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
+ atomic_t *condition)
+{
+ LASSERT(cfs_hash_with_no_bktlock(hs));
+ return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
+}
+
static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
cfs_hash_bd_t *bd, int excl)
{
* operations on cfs_hash bucket (bd: bucket descriptor),
* they are normally for hash-table without rehash
*/
-void cfs_hash_bd_get(cfs_hash_t *hs, void *key, cfs_hash_bd_t *bd);
+void cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd);
-static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, void *key,
+static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, const void *key,
cfs_hash_bd_t *bd, int excl)
{
cfs_hash_bd_get(hs, key, bd);
cfs_hash_bd_lock(hs, bd, excl);
}
+static inline unsigned cfs_hash_bd_index_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+{
+ return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
+}
+
+static inline void cfs_hash_bd_index_set(cfs_hash_t *hs,
+ unsigned index, cfs_hash_bd_t *bd)
+{
+ bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
+ bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
+}
+
static inline void *
cfs_hash_bd_extra_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
{
cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode);
static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_atomic_t *condition)
+ atomic_t *condition)
{
- LASSERT(cfs_hash_with_spin_bktlock(hs));
- return cfs_atomic_dec_and_lock(condition,
- &bd->bd_bucket->hsb_lock.spin);
+ LASSERT(cfs_hash_with_spin_bktlock(hs));
+ return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
}
static inline cfs_hlist_head_t *cfs_hash_bd_hhead(cfs_hash_t *hs,
}
cfs_hlist_node_t *cfs_hash_bd_lookup_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, void *key);
+ cfs_hash_bd_t *bd, const void *key);
+cfs_hlist_node_t *cfs_hash_bd_peek_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, const void *key);
cfs_hlist_node_t *cfs_hash_bd_findadd_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, void *key,
+ cfs_hash_bd_t *bd, const void *key,
cfs_hlist_node_t *hnode,
int insist_add);
cfs_hlist_node_t *cfs_hash_bd_finddel_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, void *key,
+ cfs_hash_bd_t *bd, const void *key,
cfs_hlist_node_t *hnode);
/**
* operations on cfs_hash bucket (bd: bucket descriptor),
* they are safe for hash-table with rehash
*/
-void cfs_hash_dual_bd_get(cfs_hash_t *hs, void *key, cfs_hash_bd_t *bds);
+void cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds);
void cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
-static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, void *key,
+static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key,
cfs_hash_bd_t *bds, int excl)
{
cfs_hash_dual_bd_get(hs, key, bds);
}
cfs_hlist_node_t *cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds, void *key);
+ cfs_hash_bd_t *bds,
+ const void *key);
cfs_hlist_node_t *cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds, void *key,
+ cfs_hash_bd_t *bds,
+ const void *key,
cfs_hlist_node_t *hnode,
int insist_add);
cfs_hlist_node_t *cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds, void *key,
+ cfs_hash_bd_t *bds,
+ const void *key,
cfs_hlist_node_t *hnode);
/* Hash init/cleanup functions */
void cfs_hash_putref(cfs_hash_t *hs);
/* Hash addition functions */
-void cfs_hash_add(cfs_hash_t *hs, void *key,
+void cfs_hash_add(cfs_hash_t *hs, const void *key,
cfs_hlist_node_t *hnode);
-int cfs_hash_add_unique(cfs_hash_t *hs, void *key,
+int cfs_hash_add_unique(cfs_hash_t *hs, const void *key,
cfs_hlist_node_t *hnode);
-void *cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
+void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
cfs_hlist_node_t *hnode);
/* Hash deletion functions */
-void *cfs_hash_del(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode);
-void *cfs_hash_del_key(cfs_hash_t *hs, void *key);
+void *cfs_hash_del(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode);
+void *cfs_hash_del_key(cfs_hash_t *hs, const void *key);
/* Hash lookup/for_each functions */
#define CFS_HASH_LOOP_HOG 1024
typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
cfs_hlist_node_t *node, void *data);
-void *cfs_hash_lookup(cfs_hash_t *hs, void *key);
+void *cfs_hash_lookup(cfs_hash_t *hs, const void *key);
void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
int cfs_hash_for_each_nolock(cfs_hash_t *hs,
cfs_hash_for_each_cb_t, void *data);
int cfs_hash_for_each_empty(cfs_hash_t *hs,
cfs_hash_for_each_cb_t, void *data);
-void cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
+void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
cfs_hash_for_each_cb_t, void *data);
typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data);
void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs);
void cfs_hash_rehash_cancel(cfs_hash_t *hs);
int cfs_hash_rehash(cfs_hash_t *hs, int do_rehash);
-void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key,
+void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
void *new_key, cfs_hlist_node_t *hnode);
#if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
/* Validate hnode references the correct key */
static inline void
-cfs_hash_key_validate(cfs_hash_t *hs, void *key,
+cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
cfs_hlist_node_t *hnode)
{
LASSERT(cfs_hash_keycmp(hs, key, hnode));
#else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
static inline void
-cfs_hash_key_validate(cfs_hash_t *hs, void *key,
+cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
cfs_hlist_node_t *hnode) {}
static inline void
static inline int __cfs_hash_theta(cfs_hash_t *hs)
{
- return (cfs_atomic_read(&hs->hs_count) <<
- CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
+ return (atomic_read(&hs->hs_count) <<
+ CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
}
static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
* Generic djb2 hash algorithm for character arrays.
*/
static inline unsigned
-cfs_hash_djb2_hash(void *key, size_t size, unsigned mask)
+cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
{
unsigned i, hash = 5381;
* Generic u32 hash algorithm.
*/
static inline unsigned
-cfs_hash_u32_hash(__u32 key, unsigned mask)
+cfs_hash_u32_hash(const __u32 key, unsigned mask)
{
return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
}
* Generic u64 hash algorithm.
*/
static inline unsigned
-cfs_hash_u64_hash(__u64 key, unsigned mask)
+cfs_hash_u64_hash(const __u64 key, unsigned mask)
{
return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
}