}
/* using binary seach */
+static __inline__ unsigned long __fls(long data)
+{
+ int pos = 32;
+
+ if (!data)
+ return 0;
+
+#if BITS_PER_LONG == 64
+ pos += 32;
+
+ if ((data & 0xFFFFFFFF) == 0) {
+ data <<= 32;
+ pos -= 32;
+ }
+#endif
+
+ if (!(data & 0xFFFF0000u)) {
+ data <<= 16;
+ pos -= 16;
+ }
+ if (!(data & 0xFF000000u)) {
+ data <<= 8;
+ pos -= 8;
+ }
+ if (!(data & 0xF0000000u)) {
+ data <<= 4;
+ pos -= 4;
+ }
+ if (!(data & 0xC0000000u)) {
+ data <<= 2;
+ pos -= 2;
+ }
+ if (!(data & 0x80000000u)) {
+ data <<= 1;
+ pos -= 1;
+ }
+ return pos;
+}
+
static __inline__ unsigned long __ffs(long data)
{
int pos = 0;
}
#define __ffz(x) __ffs(~(x))
+#define __flz(x) __fls(~(x))
unsigned long find_next_bit(unsigned long *addr,
unsigned long size, unsigned long offset);
#include <lustre_lib.h>
struct lustre_hash_ops;
-
+
typedef struct lustre_hash_bucket {
- /**
- * Entries list.
- */
- struct hlist_head lhb_head;
- /**
- * Current entries.
- */
- atomic_t lhb_count;
- /**
- * Lustre_hash_bucket.
- */
- rwlock_t lhb_rwlock;
+ struct hlist_head lhb_head; /* entries list */
+ atomic_t lhb_count; /* current entries */
+ rwlock_t lhb_rwlock; /* lustre_hash_bucket */
} lustre_hash_bucket_t;
+#define LUSTRE_MAX_HASH_NAME 16
+
typedef struct lustre_hash {
- /**
- * Hash name.
- */
- char *lh_name;
- /**
- * Hash name size.
- */
- unsigned int lh_name_size;
- /**
- * Current hash size.
- */
- unsigned int lh_cur_size;
- /**
- * Min hash size.
- */
- unsigned int lh_min_size;
- /**
- * Max hash size.
- */
- unsigned int lh_max_size;
- /**
- * Resize min threshold.
- */
- unsigned int lh_min_theta;
- /**
- * Resize max threshold.
- */
- unsigned int lh_max_theta;
- /**
- * Hash flags.
- */
- int lh_flags;
- /**
- * Current entries.
- */
- atomic_t lh_count;
- /**
- * Resize count.
- */
- atomic_t lh_rehash_count;
- /**
- * Hash buckets.
- */
- struct lustre_hash_bucket *lh_buckets;
- /**
- * Hash operations.
- */
- struct lustre_hash_ops *lh_ops;
- /**
- * Protects lustre_hash.
- */
- rwlock_t lh_rwlock;
+ int lh_cur_size; /* current hash size */
+ int lh_min_size; /* min hash size */
+ int lh_max_size; /* max hash size */
+ int lh_min_theta; /* resize min threshold */
+ int lh_max_theta; /* resize max threshold */
+ int lh_flags; /* hash flags */
+ atomic_t lh_count; /* current entries */
+ atomic_t lh_rehash_count;/* resize count */
+ struct lustre_hash_bucket *lh_buckets; /* hash buckets */
+ struct lustre_hash_ops *lh_ops; /* hash operations */
+ rwlock_t lh_rwlock; /* lustre_hash */
+ char lh_name[LUSTRE_MAX_HASH_NAME];
} lustre_hash_t;
typedef struct lustre_hash_ops {
void (*lh_exit)(struct hlist_node *hnode);
} lustre_hash_ops_t;
-/**
- * Enable expensive debug checks.
- */
-#define LH_DEBUG 0x0001
-/**
- * Enable dynamic hash resizing.
- */
-#define LH_REHASH 0x0002
+#define LH_DEBUG 0x0001 /* Enable expensive debug checks */
+#define LH_REHASH 0x0002 /* Enable dynamic hash resizing */
#define LHO(lh) (lh)->lh_ops
#define LHP(lh, op) (lh)->lh_ops->lh_ ## op
lh_hash(lustre_hash_t *lh, void *key, unsigned mask)
{
LASSERT(lh);
+ LASSERT(LHO(lh));
- if (LHO(lh) && LHP(lh, hash))
+ if (LHP(lh, hash))
return LHP(lh, hash)(lh, key, mask);
return -EOPNOTSUPP;
{
LASSERT(lh);
LASSERT(hnode);
+ LASSERT(LHO(lh));
- if (LHO(lh) && LHP(lh, key))
+ if (LHP(lh, key))
return LHP(lh, key)(hnode);
return NULL;
}
-/**
+/**
* Returns 1 on a match,
* XXX: This would be better if it returned, -1, 0, or 1 for
* <, =, > respectivly. It could then be used to implement
{
LASSERT(lh);
LASSERT(hnode);
+ LASSERT(LHO(lh));
- if (LHO(lh) && LHP(lh, compare))
+ if (LHP(lh, compare))
return LHP(lh, compare)(key, hnode);
return -EOPNOTSUPP;
{
LASSERT(lh);
LASSERT(hnode);
+ LASSERT(LHO(lh));
- if (LHO(lh) && LHP(lh, get))
+ if (LHP(lh, get))
return LHP(lh, get)(hnode);
return NULL;
{
LASSERT(lh);
LASSERT(hnode);
+ LASSERT(LHO(lh));
- if (LHO(lh) && LHP(lh, put))
+ if (LHP(lh, put))
return LHP(lh, put)(hnode);
return NULL;
{
LASSERT(lh);
LASSERT(hnode);
+ LASSERT(LHO(lh));
- if (LHO(lh) && LHP(lh, exit))
+ if (LHP(lh, exit))
return LHP(lh, exit)(hnode);
}
LASSERT(lh_compare(lh, key, hnode));
}
-/*
+/**
* Validate hnode is in the correct bucket.
*/
static inline void
if (unlikely(lh->lh_flags & LH_DEBUG)) {
i = lh_hash(lh, lh_key(lh, hnode), lh->lh_cur_size - 1);
LASSERT(&lh->lh_buckets[i] == lhb);
- }
- }
-
+ }
+}
+
static inline struct hlist_node *
__lustre_hash_bucket_lookup(lustre_hash_t *lh,
lustre_hash_bucket_t *lhb, void *key)
struct hlist_node *hnode)
{
hlist_del_init(hnode);
+ LASSERT(atomic_read(&lhb->lhb_count) > 0);
atomic_dec(&lhb->lhb_count);
+ LASSERT(atomic_read(&lh->lh_count) > 0);
atomic_dec(&lh->lh_count);
return lh_put(lh, hnode);
struct hlist_node *hnode);
/*
- * Hash deletion functions.
+ * Hash deletion functions.
*/
void *lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode);
void *lustre_hash_del_key(lustre_hash_t *lh, void *key);
lh_for_each_cb, void *data);
/*
- * Rehash - theta is calculated to be the average chained
+ * Rehash - Theta is calculated to be the average chained
* hash depth assuming a perfectly uniform hash funcion.
*/
int lustre_hash_rehash(lustre_hash_t *lh, int size);
int lustre_hash_debug_header(char *str, int size);
int lustre_hash_debug_str(lustre_hash_t *lh, char *str, int size);
-
-/**
+/*
* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1
*/
-#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
-/**
+#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
+/*
* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1
*/
-#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
-
+#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
/**
* Generic djb2 hash algorithm for character arrays.
for (i = 0; i < size; i++)
hash = hash * 33 + ((char *)key)[i];
- RETURN(hash & mask);
+ return (hash & mask);
}
/**
static inline unsigned
lh_u32_hash(__u32 key, unsigned mask)
{
- RETURN((key * GOLDEN_RATIO_PRIME_32) & mask);
+ return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
}
/**
static inline unsigned
lh_u64_hash(__u64 key, unsigned mask)
{
- RETURN((unsigned)(key * GOLDEN_RATIO_PRIME_64) & mask);
+ return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
}
#define lh_for_each_bucket(lh, lhb, pos) \
#define might_sleep_if(c)
#define smp_mb()
-/**
- * fls - find last (most-significant) bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
- */
-static inline
-int fls(int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-
static inline
int test_and_set_bit(int nr, unsigned long *addr)
{
}
#endif
+/* Using kernel fls(). Userspace will use one defined in user-bitops.h. */
+#ifndef __fls
+#define __fls fls
+#endif
+
#endif /* __KERNEL__ */
#endif /* _COMPAT25_H */
if (!lh)
RETURN(NULL);
- lh->lh_name_size = strlen(name) + 1;
- rwlock_init(&lh->lh_rwlock);
-
- OBD_ALLOC(lh->lh_name, lh->lh_name_size);
- if (!lh->lh_name) {
- OBD_FREE_PTR(lh);
- RETURN(NULL);
- }
-
- strncpy(lh->lh_name, name, lh->lh_name_size);
-
- atomic_set(&lh->lh_count, 0);
+ strncpy(lh->lh_name, name, sizeof(lh->lh_name));
atomic_set(&lh->lh_rehash_count, 0);
+ atomic_set(&lh->lh_count, 0);
+ rwlock_init(&lh->lh_rwlock);
lh->lh_cur_size = cur_size;
lh->lh_min_size = cur_size;
lh->lh_max_size = max_size;
OBD_VMALLOC(lh->lh_buckets, sizeof(*lh->lh_buckets) * lh->lh_cur_size);
if (!lh->lh_buckets) {
- OBD_FREE(lh->lh_name, lh->lh_name_size);
OBD_FREE_PTR(lh);
RETURN(NULL);
}
}
OBD_VFREE(lh->lh_buckets, sizeof(*lh->lh_buckets) * lh->lh_cur_size);
- OBD_FREE(lh->lh_name, lh->lh_name_size);
-
LASSERT(atomic_read(&lh->lh_count) == 0);
write_unlock(&lh->lh_rwlock);
EXIT;
}
EXPORT_SYMBOL(lustre_hash_add);
-
-/**
- * Add item @hnode to lustre hash @lh using @key. The registered
- * ops->lh_get function will be called if the item was added.
- * Returns 0 on success or -EALREADY on key collisions.
- */
-int
-lustre_hash_add_unique(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
+
+static struct hlist_node *
+lustre_hash_findadd_unique_hnode(lustre_hash_t *lh, void *key,
+ struct hlist_node *hnode)
{
+ struct hlist_node *ehnode;
lustre_hash_bucket_t *lhb;
int size;
- int rc = -EALREADY;
unsigned i;
ENTRY;
lhb = &lh->lh_buckets[i];
LASSERT(i < lh->lh_cur_size);
LASSERT(hlist_unhashed(hnode));
-
+
write_lock(&lhb->lhb_rwlock);
- if (!__lustre_hash_bucket_lookup(lh, lhb, key)) {
+ ehnode = __lustre_hash_bucket_lookup(lh, lhb, key);
+ if (ehnode) {
+ lh_get(lh, ehnode);
+ } else {
__lustre_hash_bucket_add(lh, lhb, hnode);
- rc = 0;
+ ehnode = hnode;
}
write_unlock(&lhb->lhb_rwlock);
-
+
size = lustre_hash_rehash_size(lh);
read_unlock(&lh->lh_rwlock);
if (size)
lustre_hash_rehash(lh, size);
- RETURN(rc);
+ RETURN(ehnode);
+}
+
+/**
+ * Add item @hnode to lustre hash @lh using @key. The registered
+ * ops->lh_get function will be called if the item was added.
+ * Returns 0 on success or -EALREADY on key collisions.
+ */
+int
+lustre_hash_add_unique(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
+{
+ struct hlist_node *ehnode;
+ ENTRY;
+
+ ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
+ if (ehnode != hnode)
+ RETURN(-EALREADY);
+
+ RETURN(0);
}
EXPORT_SYMBOL(lustre_hash_add_unique);
lustre_hash_findadd_unique(lustre_hash_t *lh, void *key,
struct hlist_node *hnode)
{
- struct hlist_node *existing_hnode;
- lustre_hash_bucket_t *lhb;
- int size;
- unsigned i;
+ struct hlist_node *ehnode;
void *obj;
ENTRY;
-
- __lustre_hash_key_validate(lh, key, hnode);
-
- read_lock(&lh->lh_rwlock);
- i = lh_hash(lh, key, lh->lh_cur_size - 1);
- lhb = &lh->lh_buckets[i];
- LASSERT(i < lh->lh_cur_size);
- LASSERT(hlist_unhashed(hnode));
-
- write_lock(&lhb->lhb_rwlock);
- existing_hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
- if (existing_hnode)
- obj = lh_get(lh, existing_hnode);
- else
- obj = __lustre_hash_bucket_add(lh, lhb, hnode);
- write_unlock(&lhb->lhb_rwlock);
-
- size = lustre_hash_rehash_size(lh);
- read_unlock(&lh->lh_rwlock);
- if (size)
- lustre_hash_rehash(lh, size);
-
+
+ ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
+ obj = lh_get(lh, ehnode);
+ lh_put(lh, ehnode);
RETURN(obj);
}
EXPORT_SYMBOL(lustre_hash_findadd_unique);
* Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
*/
lh_for_each_bucket(lh, lhb, i)
- dist[MIN(fls(atomic_read(&lhb->lhb_count)/MAX(theta,1)),7)]++;
+ dist[MIN(__fls(atomic_read(&lhb->lhb_count)/MAX(theta,1)),7)]++;
for (i = 0; i < 8; i++)
c += snprintf(str + c, size - c, "%d%c", dist[i],