#include <libcfs/libcfs_time.h>
#include <libcfs/libcfs_string.h>
#include <libcfs/libcfs_kernelcomm.h>
+#include <libcfs/libcfs_hash.h>
/* container_of depends on "likely" which is defined in libcfs_private.h */
static inline void *__container_of(void *ptr, unsigned long shift)
#ifndef __LIBCFS_HASH_H__
#define __LIBCFS_HASH_H__
+/*
+ * Knuth recommends primes in approximately golden ratio to the maximum
+ * integer representable by a machine word for multiplicative hashing.
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * These primes are chosen to be bit-sparse, that is operations on
+ * them can use shifts and additions instead of multiplications for
+ * machines where multiplications are slow.
+ */
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
/*
* Ideally we would use HAVE_HASH_LONG for this, but on linux we configure
/* Fast hashing routine for a long.
(C) 2002 William Lee Irwin III, IBM */
-/*
- * Knuth recommends primes in approximately golden ratio to the maximum
- * integer representable by a machine word for multiplicative hashing.
- * Chuck Lever verified the effectiveness of this technique:
- * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
- *
- * These primes are chosen to be bit-sparse, that is operations on
- * them can use shifts and additions instead of multiplications for
- * machines where multiplications are slow.
- */
#if BITS_PER_LONG == 32
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e370001UL
+#define CFS_GOLDEN_RATIO_PRIME CFS_GOLDEN_RATIO_PRIME_32
#elif BITS_PER_LONG == 64
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
+#define CFS_GOLDEN_RATIO_PRIME CFS_GOLDEN_RATIO_PRIME_64
#else
-#error Define GOLDEN_RATIO_PRIME for your wordsize.
+#error Define CFS_GOLDEN_RATIO_PRIME for your wordsize.
#endif
static inline unsigned long hash_long(unsigned long val, unsigned int bits)
hash += n;
#else
/* On some cpus multiply is faster, on others gcc will do shifts */
- hash *= GOLDEN_RATIO_PRIME;
+ hash *= CFS_GOLDEN_RATIO_PRIME;
#endif
/* High bits are more random, so use them. */
/* !(__linux__ && __KERNEL__) */
#endif
+struct cfs_hash_ops;
+
+typedef struct cfs_hash_bucket {
+ struct hlist_head hsb_head; /* entries list */
+ atomic_t hsb_count; /* current entries */
+ rwlock_t hsb_rwlock; /* cfs_hash_bucket */
+} cfs_hash_bucket_t;
+
+#define CFS_MAX_HASH_NAME 16
+
+typedef struct cfs_hash {
+ int hs_cur_bits; /* current hash bits */
+ int hs_cur_mask; /* current hash mask */
+ int hs_min_bits; /* min hash bits */
+ int hs_max_bits; /* max hash bits */
+ int hs_min_theta; /* resize min threshold */
+ int hs_max_theta; /* resize max threshold */
+ int hs_flags; /* hash flags */
+ atomic_t hs_count; /* current entries */
+ atomic_t hs_rehash_count;/* resize count */
+ struct cfs_hash_bucket **hs_buckets; /* hash buckets */
+ struct cfs_hash_ops *hs_ops; /* hash operations */
+ rwlock_t hs_rwlock; /* cfs_hash */
+ char hs_name[CFS_MAX_HASH_NAME];
+} cfs_hash_t;
+
+typedef struct cfs_hash_ops {
+ unsigned (*hs_hash)(cfs_hash_t *hs, void *key, unsigned mask);
+ void * (*hs_key)(struct hlist_node *hnode);
+ int (*hs_compare)(void *key, struct hlist_node *hnode);
+ void * (*hs_get)(struct hlist_node *hnode);
+ void * (*hs_put)(struct hlist_node *hnode);
+ void (*hs_exit)(struct hlist_node *hnode);
+} cfs_hash_ops_t;
+
+#define CFS_HASH_DEBUG 0x0001 /* Enable expensive debug checks */
+#define CFS_HASH_REHASH 0x0002 /* Enable dynamic hash resizing */
+
+#define CFS_HO(hs) (hs)->hs_ops
+#define CFS_HOP(hs, op) (hs)->hs_ops->hs_ ## op
+
+static inline unsigned
+cfs_hash_id(cfs_hash_t *hs, void *key, unsigned mask)
+{
+ LASSERT(hs);
+ LASSERT(CFS_HO(hs));
+ LASSERT(CFS_HOP(hs, hash));
+
+ return CFS_HOP(hs, hash)(hs, key, mask);
+}
+
+static inline void *
+cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ LASSERT(hs);
+ LASSERT(hnode);
+ LASSERT(CFS_HO(hs));
+
+ if (CFS_HOP(hs, key))
+ return CFS_HOP(hs, key)(hnode);
+
+ return NULL;
+}
+
+/* Returns 1 on a match,
+ * XXX: This would be better if it returned, -1, 0, or 1 for
+ * <, =, > respectivly. It could then be used to implement
+ * a CFS_HASH_SORT feature flags which could keep each hash
+ * bucket in order. This would increase insertion times
+ * but could reduce lookup times for deep chains. Ideally,
+ * the rehash should keep chain depth short but if that
+ * ends up not being the case this would be a nice feature.
+ */
+static inline int
+cfs_hash_compare(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+{
+ LASSERT(hs);
+ LASSERT(hnode);
+ LASSERT(CFS_HO(hs));
+
+ if (CFS_HOP(hs, compare))
+ return CFS_HOP(hs, compare)(key, hnode);
+
+ return -EOPNOTSUPP;
+}
+
+static inline void *
+cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ LASSERT(hs);
+ LASSERT(hnode);
+ LASSERT(CFS_HO(hs));
+
+ if (CFS_HOP(hs, get))
+ return CFS_HOP(hs, get)(hnode);
+
+ return NULL;
+}
+
+static inline void *
+cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ LASSERT(hs);
+ LASSERT(hnode);
+ LASSERT(CFS_HO(hs));
+
+ if (CFS_HOP(hs, put))
+ return CFS_HOP(hs, put)(hnode);
+
+ return NULL;
+}
+
+static inline void
+cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ LASSERT(hs);
+ LASSERT(hnode);
+ LASSERT(CFS_HO(hs));
+
+ if (CFS_HOP(hs, exit))
+ return CFS_HOP(hs, exit)(hnode);
+}
+
+/* Validate hnode references the correct key */
+static inline void
+__cfs_hash_key_validate(cfs_hash_t *hs, void *key,
+ struct hlist_node *hnode)
+{
+ if (unlikely(hs->hs_flags & CFS_HASH_DEBUG))
+ LASSERT(cfs_hash_compare(hs, key, hnode) > 0);
+}
+
+/* Validate hnode is in the correct bucket */
+static inline void
+__cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bucket_t *hsb,
+ struct hlist_node *hnode)
+{
+ unsigned i;
+
+ if (unlikely(hs->hs_flags & CFS_HASH_DEBUG)) {
+ i = cfs_hash_id(hs, cfs_hash_key(hs, hnode), hs->hs_cur_mask);
+ LASSERT(hs->hs_buckets[i] == hsb);
+ }
+}
+
+static inline struct hlist_node *
+__cfs_hash_bucket_lookup(cfs_hash_t *hs,
+ cfs_hash_bucket_t *hsb, void *key)
+{
+ struct hlist_node *hnode;
+
+ hlist_for_each(hnode, &hsb->hsb_head)
+ if (cfs_hash_compare(hs, key, hnode) > 0)
+ return hnode;
+
+ return NULL;
+}
+
+static inline void *
+__cfs_hash_bucket_add(cfs_hash_t *hs,
+ cfs_hash_bucket_t *hsb,
+ struct hlist_node *hnode)
+{
+ hlist_add_head(hnode, &(hsb->hsb_head));
+ atomic_inc(&hsb->hsb_count);
+ atomic_inc(&hs->hs_count);
+
+ return cfs_hash_get(hs, hnode);
+}
+
+static inline void *
+__cfs_hash_bucket_del(cfs_hash_t *hs,
+ cfs_hash_bucket_t *hsb,
+ struct hlist_node *hnode)
+{
+ hlist_del_init(hnode);
+ LASSERT(atomic_read(&hsb->hsb_count) > 0);
+ atomic_dec(&hsb->hsb_count);
+ LASSERT(atomic_read(&hs->hs_count) > 0);
+ atomic_dec(&hs->hs_count);
+
+ return cfs_hash_put(hs, hnode);
+}
+
+/* Hash init/cleanup functions */
+cfs_hash_t *cfs_hash_create(char *name, unsigned int cur_bits,
+ unsigned int max_bits,
+ cfs_hash_ops_t *ops, int flags);
+void cfs_hash_destroy(cfs_hash_t *hs);
+
+/* Hash addition functions */
+void cfs_hash_add(cfs_hash_t *hs, void *key,
+ struct hlist_node *hnode);
+int cfs_hash_add_unique(cfs_hash_t *hs, void *key,
+ struct hlist_node *hnode);
+void *cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
+ struct hlist_node *hnode);
+
+/* Hash deletion functions */
+void *cfs_hash_del(cfs_hash_t *hs, void *key, struct hlist_node *hnode);
+void *cfs_hash_del_key(cfs_hash_t *hs, void *key);
+
+/* Hash lookup/for_each functions */
+void *cfs_hash_lookup(cfs_hash_t *hs, void *key);
+typedef void (*cfs_hash_for_each_cb_t)(void *obj, void *data);
+void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
+void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
+void cfs_hash_for_each_empty(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
+void cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
+ cfs_hash_for_each_cb_t, void *data);
+
+/*
+ * Rehash - Theta is calculated to be the average chained
+ * hash depth assuming a perfectly uniform hash funcion.
+ */
+int cfs_hash_rehash(cfs_hash_t *hs, int bits);
+void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key,
+ void *new_key, struct hlist_node *hnode);
+
+
+#define CFS_HASH_THETA_BITS 10
+
+/* Return integer component of theta */
+static inline int __cfs_hash_theta_int(int theta)
+{
+ return (theta >> CFS_HASH_THETA_BITS);
+}
+
+/* Return a fractional value between 0 and 999 */
+static inline int __cfs_hash_theta_frac(int theta)
+{
+ return ((theta * 1000) >> CFS_HASH_THETA_BITS) -
+ (__cfs_hash_theta_int(theta) * 1000);
+}
+
+static inline int __cfs_hash_theta(cfs_hash_t *hs)
+{
+ return (atomic_read(&hs->hs_count) <<
+ CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
+}
+
+static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
+{
+ LASSERT(min < max);
+ hs->hs_min_theta = min;
+ hs->hs_max_theta = max;
+}
+
+/* Generic debug formatting routines mainly for proc handler */
+int cfs_hash_debug_header(char *str, int size);
+int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size);
+
+/*
+ * Generic djb2 hash algorithm for character arrays.
+ */
+static inline unsigned
+cfs_hash_djb2_hash(void *key, size_t size, unsigned mask)
+{
+ unsigned i, hash = 5381;
+
+ LASSERT(key != NULL);
+
+ for (i = 0; i < size; i++)
+ hash = hash * 33 + ((char *)key)[i];
+
+ return (hash & mask);
+}
+
+/*
+ * Generic u32 hash algorithm.
+ */
+static inline unsigned
+cfs_hash_u32_hash(__u32 key, unsigned mask)
+{
+ return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
+}
+
+/*
+ * Generic u64 hash algorithm.
+ */
+static inline unsigned
+cfs_hash_u64_hash(__u64 key, unsigned mask)
+{
+ return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
+}
+
+#define cfs_hash_for_each_bucket(hs, hsb, pos) \
+ for (pos = 0; \
+ pos <= hs->hs_cur_mask && \
+ ({ hsb = hs->hs_buckets[i]; 1; }); \
+ pos++)
+
/* !__LIBCFS__HASH_H__ */
#endif
#ifndef __LIBCFS_PRIM_H__
#define __LIBCFS_PRIM_H__
+#ifndef CFS_EXPORT_SYMBOL
+# define CFS_EXPORT_SYMBOL(s)
+#endif
+
/*
* Schedule
*/
/* !__KERNEL__ */
#endif
+#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof (*(ptr)));
+#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof (*(ptr)));
+
/** Compile-time assertion.
* Check an invariant described by a constant expression at compile time by
#include <libcfs/linux/linux-time.h>
+#define CFS_EXPORT_SYMBOL(s) EXPORT_SYMBOL(s)
+
/*
* Pseudo device register
*/
#define __user
#endif
+#ifndef __fls
+#define __fls fls
+#endif
+
#define ll_proc_dointvec(table, write, filp, buffer, lenp, ppos) \
proc_dointvec(table, write, filp, buffer, lenp, ppos);
})
#endif
+#ifndef min
+# define min(x,y) ((x)<(y) ? (x) : (y))
+#endif
+
+#ifndef max
+# define max(x,y) ((x)>(y) ? (x) : (y))
+#endif
+
/* utility libcfs init/fini entries */
#ifdef __WINNT__
extern int libcfs_arch_init(void);
endif
libcfs-all-objs := debug.o nidstrings.o lwt.o module.o tracefile.o watchdog.o \
- libcfs_string.o
+ libcfs_string.o hash.o
libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs)
if LIBLUSTRE
noinst_LIBRARIES= libcfs.a
-libcfs_a_SOURCES= posix/posix-debug.c user-prim.c user-lock.c user-tcpip.c user-bitops.c user-mem.c ulinux/ulinux-kernelcomm.c
+libcfs_a_SOURCES= posix/posix-debug.c user-prim.c user-lock.c user-tcpip.c \
+ user-bitops.c user-mem.c hash.c ulinux/ulinux-kernelcomm.c
libcfs_a_CPPFLAGS = $(LLCPPFLAGS)
libcfs_a_CFLAGS = $(LLCFLAGS)
endif
darwin/darwin-debug.c darwin/darwin-proc.c \
darwin/darwin-tracefile.c darwin/darwin-module.c \
posix/posix-debug.c module.c tracefile.c nidstrings.c watchdog.c \
- ulinux/ulinux-kernelcomm.c
+ ulinux/ulinux-kernelcomm.c hash.c
libcfs_CFLAGS := $(EXTRA_KCFLAGS)
libcfs_LDFLAGS := $(EXTRA_KLDFLAGS)
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/hash.c
+ *
+ * Implement a hash class for hash process in lustre system.
+ *
+ * Author: YuZhangyong <yzy@clusterfs.com>
+ *
+ * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
+ * - Simplified API and improved documentation
+ * - Added per-hash feature flags:
+ * * CFS_HASH_DEBUG additional validation
+ * * CFS_HASH_REHASH dynamic rehashing
+ * - Added per-hash statistics
+ * - General performance enhancements
+ *
+ * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
+ * - move all stuff to libcfs
+ * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
+ * - ignore hs_rwlock if without CFS_HASH_REHASH setting
+ * - buckets are allocated one by one(intead of contiguous memory),
+ * to avoid unnecessary cacheline conflict
+ */
+
+#include <libcfs/libcfs.h>
+
+static void
+cfs_hash_rlock(cfs_hash_t *hs)
+{
+ if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
+ read_lock(&hs->hs_rwlock);
+}
+
+static void
+cfs_hash_runlock(cfs_hash_t *hs)
+{
+ if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
+ read_unlock(&hs->hs_rwlock);
+}
+
+static void
+cfs_hash_wlock(cfs_hash_t *hs)
+{
+ if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
+ write_lock(&hs->hs_rwlock);
+}
+
+static void
+cfs_hash_wunlock(cfs_hash_t *hs)
+{
+ if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
+ write_unlock(&hs->hs_rwlock);
+}
+
+/**
+ * Initialize new libcfs hash, where:
+ * @name - Descriptive hash name
+ * @cur_bits - Initial hash table size, in bits
+ * @max_bits - Maximum allowed hash table resize, in bits
+ * @ops - Registered hash table operations
+ * @flags - CFS_HASH_REHASH enable synamic hash resizing
+ * - CFS_HASH_SORT enable chained hash sort
+ */
+cfs_hash_t *
+cfs_hash_create(char *name, unsigned int cur_bits,
+ unsigned int max_bits, cfs_hash_ops_t *ops, int flags)
+{
+ cfs_hash_t *hs;
+ int i;
+ ENTRY;
+
+ LASSERT(name != NULL);
+ LASSERT(ops != NULL);
+
+ LASSERT(cur_bits > 0);
+ LASSERT(max_bits >= cur_bits);
+ LASSERT(max_bits < 31);
+ LASSERT(cur_bits == max_bits || (flags & CFS_HASH_REHASH) != 0);
+
+ CFS_ALLOC_PTR(hs);
+ if (!hs)
+ RETURN(NULL);
+
+ strncpy(hs->hs_name, name, sizeof(hs->hs_name));
+ hs->hs_name[sizeof(hs->hs_name) - 1] = '\0';
+ atomic_set(&hs->hs_rehash_count, 0);
+ atomic_set(&hs->hs_count, 0);
+ rwlock_init(&hs->hs_rwlock);
+ hs->hs_cur_bits = cur_bits;
+ hs->hs_cur_mask = (1 << cur_bits) - 1;
+ hs->hs_min_bits = cur_bits;
+ hs->hs_max_bits = max_bits;
+ /* XXX: need to fixup cfs_hash_rehash_bits() before this can be
+ * anything other than 0.5 and 2.0 */
+ hs->hs_min_theta = 1 << (CFS_HASH_THETA_BITS - 1);
+ hs->hs_max_theta = 1 << (CFS_HASH_THETA_BITS + 1);
+ hs->hs_ops = ops;
+ hs->hs_flags = flags;
+
+ /* theta * 1000 */
+ __cfs_hash_set_theta(hs, 500, 2000);
+
+ LIBCFS_ALLOC(hs->hs_buckets,
+ sizeof(*hs->hs_buckets) << hs->hs_cur_bits);
+ if (hs->hs_buckets == NULL) {
+ CFS_FREE_PTR(hs);
+ RETURN(NULL);
+ }
+
+ for (i = 0; i <= hs->hs_cur_mask; i++) {
+ CFS_ALLOC_PTR(hs->hs_buckets[i]);
+ if (hs->hs_buckets[i] == NULL) {
+ cfs_hash_destroy(hs);
+ return NULL;
+ }
+
+ CFS_INIT_HLIST_HEAD(&hs->hs_buckets[i]->hsb_head);
+ rwlock_init(&hs->hs_buckets[i]->hsb_rwlock);
+ atomic_set(&hs->hs_buckets[i]->hsb_count, 0);
+ }
+
+ return hs;
+}
+CFS_EXPORT_SYMBOL(cfs_hash_create);
+
+/**
+ * Cleanup libcfs hash @hs.
+ */
+void
+cfs_hash_destroy(cfs_hash_t *hs)
+{
+ cfs_hash_bucket_t *hsb;
+ struct hlist_node *hnode;
+ struct hlist_node *pos;
+ int i;
+ ENTRY;
+
+ LASSERT(hs != NULL);
+
+ cfs_hash_wlock(hs);
+
+ cfs_hash_for_each_bucket(hs, hsb, i) {
+ if (hsb == NULL)
+ continue;
+
+ write_lock(&hsb->hsb_rwlock);
+ hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
+ __cfs_hash_bucket_validate(hs, hsb, hnode);
+ __cfs_hash_bucket_del(hs, hsb, hnode);
+ cfs_hash_exit(hs, hnode);
+ }
+
+ LASSERT(hlist_empty(&(hsb->hsb_head)));
+ LASSERT(atomic_read(&hsb->hsb_count) == 0);
+ write_unlock(&hsb->hsb_rwlock);
+ CFS_FREE_PTR(hsb);
+ }
+
+ LASSERT(atomic_read(&hs->hs_count) == 0);
+ cfs_hash_wunlock(hs);
+
+ LIBCFS_FREE(hs->hs_buckets,
+ sizeof(*hs->hs_buckets) << hs->hs_cur_bits);
+ CFS_FREE_PTR(hs);
+ EXIT;
+}
+CFS_EXPORT_SYMBOL(cfs_hash_destroy);
+
+static inline unsigned int
+cfs_hash_rehash_bits(cfs_hash_t *hs)
+{
+ if (!(hs->hs_flags & CFS_HASH_REHASH))
+ return 0;
+
+ /* XXX: need to handle case with max_theta != 2.0
+ * and the case with min_theta != 0.5 */
+ if ((hs->hs_cur_bits < hs->hs_max_bits) &&
+ (__cfs_hash_theta(hs) > hs->hs_max_theta))
+ return hs->hs_cur_bits + 1;
+
+ if ((hs->hs_cur_bits > hs->hs_min_bits) &&
+ (__cfs_hash_theta(hs) < hs->hs_min_theta))
+ return hs->hs_cur_bits - 1;
+
+ return 0;
+}
+
+/**
+ * Add item @hnode to libcfs hash @hs using @key. The registered
+ * ops->hs_get function will be called when the item is added.
+ */
+void
+cfs_hash_add(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+{
+ cfs_hash_bucket_t *hsb;
+ int bits;
+ unsigned i;
+ ENTRY;
+
+ __cfs_hash_key_validate(hs, key, hnode);
+
+ cfs_hash_rlock(hs);
+ i = cfs_hash_id(hs, key, hs->hs_cur_mask);
+ hsb = hs->hs_buckets[i];
+ LASSERT(i <= hs->hs_cur_mask);
+ LASSERT(hlist_unhashed(hnode));
+
+ write_lock(&hsb->hsb_rwlock);
+ __cfs_hash_bucket_add(hs, hsb, hnode);
+ write_unlock(&hsb->hsb_rwlock);
+
+ bits = cfs_hash_rehash_bits(hs);
+ cfs_hash_runlock(hs);
+ if (bits)
+ cfs_hash_rehash(hs, bits);
+
+ EXIT;
+}
+CFS_EXPORT_SYMBOL(cfs_hash_add);
+
+static struct hlist_node *
+cfs_hash_findadd_unique_hnode(cfs_hash_t *hs, void *key,
+ struct hlist_node *hnode)
+{
+ int bits = 0;
+ struct hlist_node *ehnode;
+ cfs_hash_bucket_t *hsb;
+ unsigned i;
+ ENTRY;
+
+ __cfs_hash_key_validate(hs, key, hnode);
+
+ cfs_hash_rlock(hs);
+ i = cfs_hash_id(hs, key, hs->hs_cur_mask);
+ hsb = hs->hs_buckets[i];
+ LASSERT(i <= hs->hs_cur_mask);
+ LASSERT(hlist_unhashed(hnode));
+
+ write_lock(&hsb->hsb_rwlock);
+ ehnode = __cfs_hash_bucket_lookup(hs, hsb, key);
+ if (ehnode) {
+ cfs_hash_get(hs, ehnode);
+ } else {
+ __cfs_hash_bucket_add(hs, hsb, hnode);
+ ehnode = hnode;
+ bits = cfs_hash_rehash_bits(hs);
+ }
+ write_unlock(&hsb->hsb_rwlock);
+ cfs_hash_runlock(hs);
+ if (bits)
+ cfs_hash_rehash(hs, bits);
+
+ RETURN(ehnode);
+}
+
+/**
+ * Add item @hnode to libcfs hash @hs using @key. The registered
+ * ops->hs_get function will be called if the item was added.
+ * Returns 0 on success or -EALREADY on key collisions.
+ */
+int
+cfs_hash_add_unique(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+{
+ struct hlist_node *ehnode;
+ ENTRY;
+
+ ehnode = cfs_hash_findadd_unique_hnode(hs, key, hnode);
+ if (ehnode != hnode) {
+ cfs_hash_put(hs, ehnode);
+ RETURN(-EALREADY);
+ }
+ RETURN(0);
+}
+CFS_EXPORT_SYMBOL(cfs_hash_add_unique);
+
+/**
+ * Add item @hnode to libcfs hash @hs using @key. If this @key
+ * already exists in the hash then ops->hs_get will be called on the
+ * conflicting entry and that entry will be returned to the caller.
+ * Otherwise ops->hs_get is called on the item which was added.
+ */
+void *
+cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
+ struct hlist_node *hnode)
+{
+ struct hlist_node *ehnode;
+ void *obj;
+ ENTRY;
+
+ ehnode = cfs_hash_findadd_unique_hnode(hs, key, hnode);
+ obj = cfs_hash_get(hs, ehnode);
+ cfs_hash_put(hs, ehnode);
+ RETURN(obj);
+}
+CFS_EXPORT_SYMBOL(cfs_hash_findadd_unique);
+
+/**
+ * Delete item @hnode from the libcfs hash @hs using @key. The @key
+ * is required to ensure the correct hash bucket is locked since there
+ * is no direct linkage from the item to the bucket. The object
+ * removed from the hash will be returned and obs->hs_put is called
+ * on the removed object.
+ */
+void *
+cfs_hash_del(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+{
+ cfs_hash_bucket_t *hsb;
+ void *obj;
+ unsigned i;
+ ENTRY;
+
+ __cfs_hash_key_validate(hs, key, hnode);
+
+ cfs_hash_rlock(hs);
+ i = cfs_hash_id(hs, key, hs->hs_cur_mask);
+ hsb = hs->hs_buckets[i];
+ LASSERT(i <= hs->hs_cur_mask);
+ LASSERT(!hlist_unhashed(hnode));
+
+ write_lock(&hsb->hsb_rwlock);
+ obj = __cfs_hash_bucket_del(hs, hsb, hnode);
+ write_unlock(&hsb->hsb_rwlock);
+ cfs_hash_runlock(hs);
+
+ RETURN(obj);
+}
+CFS_EXPORT_SYMBOL(cfs_hash_del);
+
+/**
+ * Delete item given @key in libcfs hash @hs. The first @key found in
+ * the hash will be removed, if the key exists multiple times in the hash
+ * @hs this function must be called once per key. The removed object
+ * will be returned and ops->hs_put is called on the removed object.
+ */
+void *
+cfs_hash_del_key(cfs_hash_t *hs, void *key)
+{
+ void *obj = NULL;
+ struct hlist_node *hnode;
+ cfs_hash_bucket_t *hsb;
+ unsigned i;
+ ENTRY;
+
+ cfs_hash_rlock(hs);
+ i = cfs_hash_id(hs, key, hs->hs_cur_mask);
+ hsb = hs->hs_buckets[i];
+ LASSERT(i <= hs->hs_cur_mask);
+
+ write_lock(&hsb->hsb_rwlock);
+ hnode = __cfs_hash_bucket_lookup(hs, hsb, key);
+ if (hnode)
+ obj = __cfs_hash_bucket_del(hs, hsb, hnode);
+
+ write_unlock(&hsb->hsb_rwlock);
+ cfs_hash_runlock(hs);
+
+ RETURN(obj);
+}
+CFS_EXPORT_SYMBOL(cfs_hash_del_key);
+
+/**
+ * Lookup an item using @key in the libcfs hash @hs and return it.
+ * If the @key is found in the hash hs->hs_get() is called and the
+ * matching objects is returned. It is the callers responsibility
+ * to call the counterpart ops->hs_put using the cfs_hash_put() macro
+ * when when finished with the object. If the @key was not found
+ * in the hash @hs NULL is returned.
+ */
+void *
+cfs_hash_lookup(cfs_hash_t *hs, void *key)
+{
+ void *obj = NULL;
+ struct hlist_node *hnode;
+ cfs_hash_bucket_t *hsb;
+ unsigned i;
+ ENTRY;
+
+ cfs_hash_rlock(hs);
+ i = cfs_hash_id(hs, key, hs->hs_cur_mask);
+ hsb = hs->hs_buckets[i];
+ LASSERT(i <= hs->hs_cur_mask);
+
+ read_lock(&hsb->hsb_rwlock);
+ hnode = __cfs_hash_bucket_lookup(hs, hsb, key);
+ if (hnode)
+ obj = cfs_hash_get(hs, hnode);
+
+ read_unlock(&hsb->hsb_rwlock);
+ cfs_hash_runlock(hs);
+
+ RETURN(obj);
+}
+CFS_EXPORT_SYMBOL(cfs_hash_lookup);
+
+/**
+ * For each item in the libcfs hash @hs call the passed callback @func
+ * and pass to it as an argument each hash item and the private @data.
+ * Before each callback ops->hs_get will be called, and after each
+ * callback ops->hs_put will be called. Finally, during the callback
+ * the bucket lock is held so the callback must never sleep.
+ */
+void
+cfs_hash_for_each(cfs_hash_t *hs,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ struct hlist_node *hnode;
+ cfs_hash_bucket_t *hsb;
+ void *obj;
+ int i;
+ ENTRY;
+
+ cfs_hash_rlock(hs);
+ cfs_hash_for_each_bucket(hs, hsb, i) {
+ read_lock(&hsb->hsb_rwlock);
+ hlist_for_each(hnode, &(hsb->hsb_head)) {
+ __cfs_hash_bucket_validate(hs, hsb, hnode);
+ obj = cfs_hash_get(hs, hnode);
+ func(obj, data);
+ (void)cfs_hash_put(hs, hnode);
+ }
+ read_unlock(&hsb->hsb_rwlock);
+ }
+ cfs_hash_runlock(hs);
+
+ EXIT;
+}
+CFS_EXPORT_SYMBOL(cfs_hash_for_each);
+
+/**
+ * For each item in the libcfs hash @hs call the passed callback @func
+ * and pass to it as an argument each hash item and the private @data.
+ * Before each callback ops->hs_get will be called, and after each
+ * callback ops->hs_put will be called. During the callback the
+ * bucket lock will not be held will allows for the current item
+ * to be removed from the hash during the callback. However, care
+ * should be taken to prevent other callers from operating on the
+ * hash concurrently or list corruption may occur.
+ */
+void
+cfs_hash_for_each_safe(cfs_hash_t *hs,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ struct hlist_node *hnode;
+ struct hlist_node *pos;
+ cfs_hash_bucket_t *hsb;
+ void *obj;
+ int i;
+ ENTRY;
+
+ cfs_hash_rlock(hs);
+ cfs_hash_for_each_bucket(hs, hsb, i) {
+ read_lock(&hsb->hsb_rwlock);
+ hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
+ __cfs_hash_bucket_validate(hs, hsb, hnode);
+ obj = cfs_hash_get(hs, hnode);
+ read_unlock(&hsb->hsb_rwlock);
+ func(obj, data);
+ read_lock(&hsb->hsb_rwlock);
+ (void)cfs_hash_put(hs, hnode);
+ }
+ read_unlock(&hsb->hsb_rwlock);
+ }
+ cfs_hash_runlock(hs);
+ EXIT;
+}
+CFS_EXPORT_SYMBOL(cfs_hash_for_each_safe);
+
+/**
+ * For each hash bucket in the libcfs hash @hs call the passed callback
+ * @func until all the hash buckets are empty. The passed callback @func
+ * or the previously registered callback hs->hs_put must remove the item
+ * from the hash. You may either use the cfs_hash_del() or hlist_del()
+ * functions. No rwlocks will be held during the callback @func it is
+ * safe to sleep if needed. This function will not terminate until the
+ * hash is empty. Note it is still possible to concurrently add new
+ * items in to the hash. It is the callers responsibility to ensure
+ * the required locking is in place to prevent concurrent insertions.
+ */
+void
+cfs_hash_for_each_empty(cfs_hash_t *hs,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ struct hlist_node *hnode;
+ cfs_hash_bucket_t *hsb;
+ void *obj;
+ int i;
+ ENTRY;
+
+restart:
+ cfs_hash_rlock(hs);
+ cfs_hash_for_each_bucket(hs, hsb, i) {
+ write_lock(&hsb->hsb_rwlock);
+ while (!hlist_empty(&hsb->hsb_head)) {
+ hnode = hsb->hsb_head.first;
+ __cfs_hash_bucket_validate(hs, hsb, hnode);
+ obj = cfs_hash_get(hs, hnode);
+ write_unlock(&hsb->hsb_rwlock);
+ cfs_hash_runlock(hs);
+ func(obj, data);
+ (void)cfs_hash_put(hs, hnode);
+ goto restart;
+ }
+ write_unlock(&hsb->hsb_rwlock);
+ }
+ cfs_hash_runlock(hs);
+ EXIT;
+}
+CFS_EXPORT_SYMBOL(cfs_hash_for_each_empty);
+
+/*
+ * For each item in the libcfs hash @hs which matches the @key call
+ * the passed callback @func and pass to it as an argument each hash
+ * item and the private @data. Before each callback ops->hs_get will
+ * be called, and after each callback ops->hs_put will be called.
+ * Finally, during the callback the bucket lock is held so the
+ * callback must never sleep.
+ */
+void
+cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ struct hlist_node *hnode;
+ cfs_hash_bucket_t *hsb;
+ unsigned i;
+ ENTRY;
+
+ cfs_hash_rlock(hs);
+ i = cfs_hash_id(hs, key, hs->hs_cur_mask);
+ hsb = hs->hs_buckets[i];
+ LASSERT(i <= hs->hs_cur_mask);
+
+ read_lock(&hsb->hsb_rwlock);
+ hlist_for_each(hnode, &(hsb->hsb_head)) {
+ __cfs_hash_bucket_validate(hs, hsb, hnode);
+
+ if (!cfs_hash_compare(hs, key, hnode))
+ continue;
+
+ func(cfs_hash_get(hs, hnode), data);
+ (void)cfs_hash_put(hs, hnode);
+ }
+
+ read_unlock(&hsb->hsb_rwlock);
+ cfs_hash_runlock(hs);
+
+ EXIT;
+}
+CFS_EXPORT_SYMBOL(cfs_hash_for_each_key);
+
+/**
+ * Rehash the libcfs hash @hs to the given @bits. This can be used
+ * to grow the hash size when excessive chaining is detected, or to
+ * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
+ * flag is set in @hs the libcfs hash may be dynamically rehashed
+ * during addition or removal if the hash's theta value exceeds
+ * either the hs->hs_min_theta or hs->max_theta values. By default
+ * these values are tuned to keep the chained hash depth small, and
+ * this approach assumes a reasonably uniform hashing function. The
+ * theta thresholds for @hs are tunable via cfs_hash_set_theta().
+ */
+int
+cfs_hash_rehash(cfs_hash_t *hs, int bits)
+{
+ struct hlist_node *hnode;
+ struct hlist_node *pos;
+ cfs_hash_bucket_t **old_buckets;
+ cfs_hash_bucket_t **rehash_buckets;
+ cfs_hash_bucket_t *hs_hsb;
+ cfs_hash_bucket_t *rehash_hsb;
+ int i;
+ int theta;
+ int old_mask;
+ int old_bits;
+ int new_mask = (1 << bits) - 1;
+ int rc = 0;
+ void *key;
+ ENTRY;
+
+ LASSERT(!in_interrupt());
+ LASSERT(new_mask > 0);
+ LASSERT((hs->hs_flags & CFS_HASH_REHASH) != 0);
+
+ LIBCFS_ALLOC(rehash_buckets, sizeof(*rehash_buckets) << bits);
+ if (!rehash_buckets)
+ RETURN(-ENOMEM);
+
+ for (i = 0; i <= new_mask; i++) {
+ CFS_ALLOC_PTR(rehash_buckets[i]);
+ if (rehash_buckets[i] == NULL)
+ GOTO(free, rc = -ENOMEM);
+
+ CFS_INIT_HLIST_HEAD(&rehash_buckets[i]->hsb_head);
+ rwlock_init(&rehash_buckets[i]->hsb_rwlock);
+ atomic_set(&rehash_buckets[i]->hsb_count, 0);
+ }
+
+ cfs_hash_wlock(hs);
+
+ /*
+ * Early return for multiple concurrent racing callers,
+ * ensure we only trigger the rehash if it is still needed.
+ */
+ theta = __cfs_hash_theta(hs);
+ if ((theta >= hs->hs_min_theta) && (theta <= hs->hs_max_theta)) {
+ cfs_hash_wunlock(hs);
+ GOTO(free, rc = -EALREADY);
+ }
+
+ old_bits = hs->hs_cur_bits;
+ old_buckets = hs->hs_buckets;
+ old_mask = (1 << old_bits) - 1;
+
+ hs->hs_cur_bits = bits;
+ hs->hs_cur_mask = (1 << bits) - 1;
+ hs->hs_buckets = rehash_buckets;
+ atomic_inc(&hs->hs_rehash_count);
+
+ for (i = 0; i <= old_mask; i++) {
+ hs_hsb = old_buckets[i];
+
+ write_lock(&hs_hsb->hsb_rwlock);
+ hlist_for_each_safe(hnode, pos, &(hs_hsb->hsb_head)) {
+ key = cfs_hash_key(hs, hnode);
+ LASSERT(key);
+
+ /*
+ * Validate hnode is in the correct bucket.
+ */
+ if (unlikely(hs->hs_flags & CFS_HASH_DEBUG))
+ LASSERT(cfs_hash_id(hs, key, old_mask) == i);
+
+ /*
+ * Delete from old hash bucket.
+ */
+ hlist_del(hnode);
+ LASSERT(atomic_read(&hs_hsb->hsb_count) > 0);
+ atomic_dec(&hs_hsb->hsb_count);
+
+ /*
+ * Add to rehash bucket, ops->hs_key must be defined.
+ */
+ rehash_hsb = rehash_buckets[cfs_hash_id(hs, key,
+ new_mask)];
+ hlist_add_head(hnode, &(rehash_hsb->hsb_head));
+ atomic_inc(&rehash_hsb->hsb_count);
+ }
+
+ LASSERT(hlist_empty(&(hs_hsb->hsb_head)));
+ LASSERT(atomic_read(&hs_hsb->hsb_count) == 0);
+ write_unlock(&hs_hsb->hsb_rwlock);
+ }
+
+ cfs_hash_wunlock(hs);
+ rehash_buckets = old_buckets;
+ i = (1 << old_bits);
+ bits = old_bits;
+ free:
+ while (--i >= 0)
+ CFS_FREE_PTR(rehash_buckets[i]);
+ LIBCFS_FREE(rehash_buckets, sizeof(*rehash_buckets) << bits);
+ RETURN(rc);
+}
+CFS_EXPORT_SYMBOL(cfs_hash_rehash);
+
+/**
+ * Rehash the object referenced by @hnode in the libcfs hash @hs. The
+ * @old_key must be provided to locate the objects previous location
+ * in the hash, and the @new_key will be used to reinsert the object.
+ * Use this function instead of a cfs_hash_add() + cfs_hash_del()
+ * combo when it is critical that there is no window in time where the
+ * object is missing from the hash. When an object is being rehashed
+ * the registered cfs_hash_get() and cfs_hash_put() functions will
+ * not be called.
+ */
+void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key, void *new_key,
+ struct hlist_node *hnode)
+{
+ cfs_hash_bucket_t *old_hsb;
+ cfs_hash_bucket_t *new_hsb;
+ unsigned i;
+ unsigned j;
+ ENTRY;
+
+ __cfs_hash_key_validate(hs, new_key, hnode);
+ LASSERT(!hlist_unhashed(hnode));
+
+ cfs_hash_rlock(hs);
+
+ i = cfs_hash_id(hs, old_key, hs->hs_cur_mask);
+ old_hsb = hs->hs_buckets[i];
+ LASSERT(i <= hs->hs_cur_mask);
+
+ j = cfs_hash_id(hs, new_key, hs->hs_cur_mask);
+ new_hsb = hs->hs_buckets[j];
+ LASSERT(j <= hs->hs_cur_mask);
+
+ if (i < j) { /* write_lock ordering */
+ write_lock(&old_hsb->hsb_rwlock);
+ write_lock(&new_hsb->hsb_rwlock);
+ } else if (i > j) {
+ write_lock(&new_hsb->hsb_rwlock);
+ write_lock(&old_hsb->hsb_rwlock);
+ } else { /* do nothing */
+ read_unlock(&hs->hs_rwlock);
+ EXIT;
+ return;
+ }
+
+ /*
+ * Migrate item between hash buckets without calling
+ * the cfs_hash_get() and cfs_hash_put() callback functions.
+ */
+ hlist_del(hnode);
+ LASSERT(atomic_read(&old_hsb->hsb_count) > 0);
+ atomic_dec(&old_hsb->hsb_count);
+ hlist_add_head(hnode, &(new_hsb->hsb_head));
+ atomic_inc(&new_hsb->hsb_count);
+
+ write_unlock(&new_hsb->hsb_rwlock);
+ write_unlock(&old_hsb->hsb_rwlock);
+ cfs_hash_runlock(hs);
+
+ EXIT;
+}
+CFS_EXPORT_SYMBOL(cfs_hash_rehash_key);
+
+int cfs_hash_debug_header(char *str, int size)
+{
+ return snprintf(str, size,
+ "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%6s%s\n", CFS_MAX_HASH_NAME,
+ "name", "cur", "min", "max", "theta", "t-min", "t-max",
+ "flags", "rehash", "count", " distribution");
+}
+CFS_EXPORT_SYMBOL(cfs_hash_debug_header);
+
+int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
+{
+ cfs_hash_bucket_t *hsb;
+ int theta;
+ int i;
+ int c = 0;
+ int dist[8] = { 0, };
+
+ if (str == NULL || size == 0)
+ return 0;
+
+ cfs_hash_rlock(hs);
+ theta = __cfs_hash_theta(hs);
+
+ c += snprintf(str + c, size - c, "%-*s ",
+ CFS_MAX_HASH_NAME, hs->hs_name);
+ c += snprintf(str + c, size - c, "%5d ", 1 << hs->hs_cur_bits);
+ c += snprintf(str + c, size - c, "%5d ", 1 << hs->hs_min_bits);
+ c += snprintf(str + c, size - c, "%5d ", 1 << hs->hs_max_bits);
+ c += snprintf(str + c, size - c, "%d.%03d ",
+ __cfs_hash_theta_int(theta),
+ __cfs_hash_theta_frac(theta));
+ c += snprintf(str + c, size - c, "%d.%03d ",
+ __cfs_hash_theta_int(hs->hs_min_theta),
+ __cfs_hash_theta_frac(hs->hs_min_theta));
+ c += snprintf(str + c, size - c, "%d.%03d ",
+ __cfs_hash_theta_int(hs->hs_max_theta),
+ __cfs_hash_theta_frac(hs->hs_max_theta));
+ c += snprintf(str + c, size - c, " 0x%02x ", hs->hs_flags);
+ c += snprintf(str + c, size - c, "%6d ",
+ atomic_read(&hs->hs_rehash_count));
+ c += snprintf(str + c, size - c, "%5d ",
+ atomic_read(&hs->hs_count));
+
+ /*
+ * The distribution is a summary of the chained hash depth in
+ * each of the libcfs hash buckets. Each buckets hsb_count is
+ * divided by the hash theta value and used to generate a
+ * histogram of the hash distribution. A uniform hash will
+ * result in all hash buckets being close to the average thus
+ * only the first few entries in the histogram will be non-zero.
+ * If you hash function results in a non-uniform hash the will
+ * be observable by outlier bucks in the distribution histogram.
+ *
+ * Uniform hash distribution: 128/128/0/0/0/0/0/0
+ * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
+ */
+ cfs_hash_for_each_bucket(hs, hsb, i)
+ dist[min(__fls(atomic_read(&hsb->hsb_count)/max(theta,1)),7)]++;
+
+ for (i = 0; i < 8; i++)
+ c += snprintf(str + c, size - c, "%d%c", dist[i],
+ (i == 7) ? '\n' : '/');
+
+ cfs_hash_runlock(hs);
+
+ return c;
+}
+CFS_EXPORT_SYMBOL(cfs_hash_debug_str);
lustre_fsfilt.h lustre_ha.h lustre_handles.h lustre_import.h \
lustre_lib.h lustre_sec.h lustre_lite.h lustre_log.h lustre_mds.h \
lustre_mdc.h lustre_net.h lustre_quota.h lustre_ucache.h lvfs.h \
- class_hash.h obd_cache.h obd_class.h obd.h obd_lov.h \
+ obd_cache.h obd_class.h obd.h obd_lov.h \
obd_ost.h obd_support.h lustre_ver.h lu_object.h lu_time.h \
md_object.h dt_object.h lustre_param.h lustre_mdt.h \
lustre_fid.h lustre_fld.h lustre_req_layout.h lustre_capa.h \
+++ /dev/null
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __CLASS_HASH_H
-#define __CLASS_HASH_H
-
-#include <lustre_lib.h>
-
-struct lustre_hash_ops;
-
-typedef struct lustre_hash_bucket {
- struct hlist_head lhb_head; /* entries list */
- atomic_t lhb_count; /* current entries */
- rwlock_t lhb_rwlock; /* lustre_hash_bucket */
-} lustre_hash_bucket_t;
-
-#define LUSTRE_MAX_HASH_NAME 16
-
-typedef struct lustre_hash {
- int lh_cur_bits; /* current hash bits */
- int lh_cur_mask; /* current hash mask */
- int lh_min_bits; /* min hash bits */
- int lh_max_bits; /* max hash bits */
- int lh_min_theta; /* resize min threshold */
- int lh_max_theta; /* resize max threshold */
- int lh_flags; /* hash flags */
- atomic_t lh_count; /* current entries */
- atomic_t lh_rehash_count;/* resize count */
- struct lustre_hash_bucket **lh_buckets; /* hash buckets */
- struct lustre_hash_ops *lh_ops; /* hash operations */
- rwlock_t lh_rwlock; /* lustre_hash */
- char lh_name[LUSTRE_MAX_HASH_NAME];
-} lustre_hash_t;
-
-typedef struct lustre_hash_ops {
- unsigned (*lh_hash)(lustre_hash_t *lh, void *key, unsigned mask);
- void * (*lh_key)(struct hlist_node *hnode);
- int (*lh_compare)(void *key, struct hlist_node *hnode);
- void * (*lh_get)(struct hlist_node *hnode);
- void * (*lh_put)(struct hlist_node *hnode);
- void (*lh_exit)(struct hlist_node *hnode);
-} lustre_hash_ops_t;
-
-#define LH_DEBUG 0x0001 /* Enable expensive debug checks */
-#define LH_REHASH 0x0002 /* Enable dynamic hash resizing */
-
-#define LHO(lh) (lh)->lh_ops
-#define LHP(lh, op) (lh)->lh_ops->lh_ ## op
-
-static inline unsigned
-lh_hash(lustre_hash_t *lh, void *key, unsigned mask)
-{
- LASSERT(lh);
- LASSERT(LHO(lh));
- LASSERT(LHP(lh, hash));
-
- return LHP(lh, hash)(lh, key, mask);
-}
-
-static inline void *
-lh_key(lustre_hash_t *lh, struct hlist_node *hnode)
-{
- LASSERT(lh);
- LASSERT(hnode);
- LASSERT(LHO(lh));
-
- if (LHP(lh, key))
- return LHP(lh, key)(hnode);
-
- return NULL;
-}
-
-/* Returns 1 on a match,
- * XXX: This would be better if it returned, -1, 0, or 1 for
- * <, =, > respectivly. It could then be used to implement
- * a LH_SORT feature flags which could keep each lustre hash
- * bucket in order. This would increase insertion times
- * but could reduce lookup times for deep chains. Ideally,
- * the rehash should keep chain depth short but if that
- * ends up not being the case this would be a nice feature.
- */
-static inline int
-lh_compare(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
-{
- LASSERT(lh);
- LASSERT(hnode);
- LASSERT(LHO(lh));
-
- if (LHP(lh, compare))
- return LHP(lh, compare)(key, hnode);
-
- return -EOPNOTSUPP;
-}
-
-static inline void *
-lh_get(lustre_hash_t *lh, struct hlist_node *hnode)
-{
- LASSERT(lh);
- LASSERT(hnode);
- LASSERT(LHO(lh));
-
- if (LHP(lh, get))
- return LHP(lh, get)(hnode);
-
- return NULL;
-}
-
-static inline void *
-lh_put(lustre_hash_t *lh, struct hlist_node *hnode)
-{
- LASSERT(lh);
- LASSERT(hnode);
- LASSERT(LHO(lh));
-
- if (LHP(lh, put))
- return LHP(lh, put)(hnode);
-
- return NULL;
-}
-
-static inline void
-lh_exit(lustre_hash_t *lh, struct hlist_node *hnode)
-{
- LASSERT(lh);
- LASSERT(hnode);
- LASSERT(LHO(lh));
-
- if (LHP(lh, exit))
- return LHP(lh, exit)(hnode);
-}
-
-/* Validate hnode references the correct key */
-static inline void
-__lustre_hash_key_validate(lustre_hash_t *lh, void *key,
- struct hlist_node *hnode)
-{
- if (unlikely(lh->lh_flags & LH_DEBUG))
- LASSERT(lh_compare(lh, key, hnode) > 0);
-}
-
-/* Validate hnode is in the correct bucket */
-static inline void
-__lustre_hash_bucket_validate(lustre_hash_t *lh, lustre_hash_bucket_t *lhb,
- struct hlist_node *hnode)
-{
- unsigned i;
-
- if (unlikely(lh->lh_flags & LH_DEBUG)) {
- i = lh_hash(lh, lh_key(lh, hnode), lh->lh_cur_mask);
- LASSERT(lh->lh_buckets[i] == lhb);
- }
-}
-
-static inline struct hlist_node *
-__lustre_hash_bucket_lookup(lustre_hash_t *lh,
- lustre_hash_bucket_t *lhb, void *key)
-{
- struct hlist_node *hnode;
-
- hlist_for_each(hnode, &lhb->lhb_head)
- if (lh_compare(lh, key, hnode) > 0)
- return hnode;
-
- return NULL;
-}
-
-static inline void *
-__lustre_hash_bucket_add(lustre_hash_t *lh,
- lustre_hash_bucket_t *lhb,
- struct hlist_node *hnode)
-{
- hlist_add_head(hnode, &(lhb->lhb_head));
- atomic_inc(&lhb->lhb_count);
- atomic_inc(&lh->lh_count);
-
- return lh_get(lh, hnode);
-}
-
-static inline void *
-__lustre_hash_bucket_del(lustre_hash_t *lh,
- lustre_hash_bucket_t *lhb,
- struct hlist_node *hnode)
-{
- hlist_del_init(hnode);
- LASSERT(atomic_read(&lhb->lhb_count) > 0);
- atomic_dec(&lhb->lhb_count);
- LASSERT(atomic_read(&lh->lh_count) > 0);
- atomic_dec(&lh->lh_count);
-
- return lh_put(lh, hnode);
-}
-
-/* Some hash init argument constants */
-#define HASH_POOLS_CUR_BITS 3
-#define HASH_POOLS_MAX_BITS 7
-#define HASH_UUID_CUR_BITS 7
-#define HASH_UUID_MAX_BITS 12
-#define HASH_NID_CUR_BITS 7
-#define HASH_NID_MAX_BITS 12
-#define HASH_NID_STATS_CUR_BITS 7
-#define HASH_NID_STATS_MAX_BITS 12
-#define HASH_LQS_CUR_BITS 7
-#define HASH_LQS_MAX_BITS 12
-#define HASH_CONN_CUR_BITS 5
-#define HASH_CONN_MAX_BITS 15
-
-/* Hash init/cleanup functions */
-lustre_hash_t *lustre_hash_init(char *name, unsigned int cur_bits,
- unsigned int max_bits,
- lustre_hash_ops_t *ops, int flags);
-void lustre_hash_exit(lustre_hash_t *lh);
-
-/* Hash addition functions */
-void lustre_hash_add(lustre_hash_t *lh, void *key,
- struct hlist_node *hnode);
-int lustre_hash_add_unique(lustre_hash_t *lh, void *key,
- struct hlist_node *hnode);
-void *lustre_hash_findadd_unique(lustre_hash_t *lh, void *key,
- struct hlist_node *hnode);
-
-/* Hash deletion functions */
-void *lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode);
-void *lustre_hash_del_key(lustre_hash_t *lh, void *key);
-
-/* Hash lookup/for_each functions */
-void *lustre_hash_lookup(lustre_hash_t *lh, void *key);
-typedef void (*lh_for_each_cb)(void *obj, void *data);
-void lustre_hash_for_each(lustre_hash_t *lh, lh_for_each_cb, void *data);
-void lustre_hash_for_each_safe(lustre_hash_t *lh, lh_for_each_cb, void *data);
-void lustre_hash_for_each_empty(lustre_hash_t *lh, lh_for_each_cb, void *data);
-void lustre_hash_for_each_key(lustre_hash_t *lh, void *key,
- lh_for_each_cb, void *data);
-
-/*
- * Rehash - Theta is calculated to be the average chained
- * hash depth assuming a perfectly uniform hash funcion.
- */
-int lustre_hash_rehash(lustre_hash_t *lh, int bits);
-void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key,
- void *new_key, struct hlist_node *hnode);
-
-
-#define LH_THETA_BITS 10
-
-/* Return integer component of theta */
-static inline int __lustre_hash_theta_int(int theta)
-{
- return (theta >> LH_THETA_BITS);
-}
-
-/* Return a fractional value between 0 and 999 */
-static inline int __lustre_hash_theta_frac(int theta)
-{
- return ((theta * 1000) >> LH_THETA_BITS) -
- (__lustre_hash_theta_int(theta) * 1000);
-}
-
-static inline int __lustre_hash_theta(lustre_hash_t *lh)
-{
- return (atomic_read(&lh->lh_count) << LH_THETA_BITS) >> lh->lh_cur_bits;
-}
-
-static inline void __lustre_hash_set_theta(lustre_hash_t *lh, int min, int max)
-{
- LASSERT(min < max);
- lh->lh_min_theta = min;
- lh->lh_max_theta = max;
-}
-
-/* Generic debug formatting routines mainly for proc handler */
-int lustre_hash_debug_header(char *str, int size);
-int lustre_hash_debug_str(lustre_hash_t *lh, char *str, int size);
-
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
-/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
-
-/*
- * Generic djb2 hash algorithm for character arrays.
- */
-static inline unsigned
-lh_djb2_hash(void *key, size_t size, unsigned mask)
-{
- unsigned i, hash = 5381;
-
- LASSERT(key != NULL);
-
- for (i = 0; i < size; i++)
- hash = hash * 33 + ((char *)key)[i];
-
- return (hash & mask);
-}
-
-/*
- * Generic u32 hash algorithm.
- */
-static inline unsigned
-lh_u32_hash(__u32 key, unsigned mask)
-{
- return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
-}
-
-/*
- * Generic u64 hash algorithm.
- */
-static inline unsigned
-lh_u64_hash(__u64 key, unsigned mask)
-{
- return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
-}
-
-#define lh_for_each_bucket(lh, lhb, pos) \
- for (pos = 0; \
- pos <= lh->lh_cur_mask && \
- ({ lhb = lh->lh_buckets[i]; 1; }); \
- pos++)
-
-#endif /* __CLASS_HASH_H */
typedef __u64 kdev_t;
+#ifndef min
#define min(x,y) ((x)<(y) ? (x) : (y))
+#endif
+
+#ifndef max
#define max(x,y) ((x)>(y) ? (x) : (y))
+#endif
#ifndef min_t
#define min_t(type,x,y) \
#include <lprocfs_status.h>
#include <lustre/lustre_idl.h>
#include <lustre_dlm.h>
-#include <class_hash.h>
struct mds_client_data;
struct mdt_client_data;
struct lprocfs_stats *exp_md_stats;
struct ptlrpc_connection *exp_connection;
__u32 exp_conn_cnt;
- lustre_hash_t *exp_lock_hash; /* existing lock hash */
+ cfs_hash_t *exp_lock_hash; /* existing lock hash */
spinlock_t exp_lock_hash_lock;
struct list_head exp_outstanding_replies;
struct list_head exp_uncommitted_replies;
#include <lustre/lustre_idl.h>
#include <lvfs.h>
#include <obd_support.h>
-#include <class_hash.h>
struct obd_device;
struct client_obd;
/** See comment of lqc_itune_sz */
unsigned long lqc_btune_sz;
/** all lustre_qunit_size structures */
- struct lustre_hash *lqc_lqs_hash;
+ cfs_hash_t *lqc_lqs_hash;
/** @{ */
/**
#include <lustre_quota.h>
#include <lustre_fld.h>
#include <lustre_capa.h>
-#include <class_hash.h>
#include <libcfs/bitmap.h>
__u32 lov_tgt_size; /* size of tgts array */
int lov_connects;
int lov_pool_count;
- lustre_hash_t *lov_pools_hash_body; /* used for key access */
+ cfs_hash_t *lov_pools_hash_body; /* used for key access */
struct list_head lov_pool_list; /* used for sequential access */
cfs_proc_dir_entry_t *lov_pool_proc_entry;
enum lustre_sec_part lov_sp_me;
* (for /proc/status only!!) */
obd_process_conf:1; /* device is processing mgs config */
/* uuid-export hash body */
- struct lustre_hash *obd_uuid_hash;
+ cfs_hash_t *obd_uuid_hash;
/* nid-export hash body */
- struct lustre_hash *obd_nid_hash;
+ cfs_hash_t *obd_nid_hash;
/* nid stats body */
- struct lustre_hash *obd_nid_stats_hash;
+ cfs_hash_t *obd_nid_stats_hash;
struct list_head obd_nid_stats;
atomic_t obd_refcount;
cfs_waitq_t obd_refcount_waitq;
int obd_alloc_fail(const void *ptr, const char *name, const char *type,
size_t size, const char *file, int line);
+/* Some hash init argument constants */
+#define HASH_POOLS_CUR_BITS 3
+#define HASH_POOLS_MAX_BITS 7
+#define HASH_UUID_CUR_BITS 7
+#define HASH_UUID_MAX_BITS 12
+#define HASH_NID_CUR_BITS 7
+#define HASH_NID_MAX_BITS 12
+#define HASH_NID_STATS_CUR_BITS 7
+#define HASH_NID_STATS_MAX_BITS 12
+#define HASH_LQS_CUR_BITS 7
+#define HASH_LQS_MAX_BITS 12
+#define HASH_CONN_CUR_BITS 5
+#define HASH_CONN_MAX_BITS 15
+#define HASH_EXP_LOCK_CUR_BITS 7
+#define HASH_EXP_LOCK_MAX_BITS 16
+
/* Timeout definitions */
#define OBD_TIMEOUT_DEFAULT 100
#define LDLM_TIMEOUT_DEFAULT 20
new2->l_export = class_export_lock_get(lock->l_export, new2);
if (new2->l_export->exp_lock_hash &&
hlist_unhashed(&new2->l_exp_hash))
- lustre_hash_add(new2->l_export->exp_lock_hash,
- &new2->l_remote_handle,
- &new2->l_exp_hash);
+ cfs_hash_add(new2->l_export->exp_lock_hash,
+ &new2->l_remote_handle,
+ &new2->l_exp_hash);
}
if (*flags == LDLM_FL_WAIT_NOREPROC)
ldlm_lock_addref_internal_nolock(new2,
if (obd_uuid_equals(&cluuid, &target->obd_uuid))
goto dont_check_exports;
- export = lustre_hash_lookup(target->obd_uuid_hash, &cluuid);
+ export = cfs_hash_lookup(target->obd_uuid_hash, &cluuid);
if (!export)
goto no_export;
/* Check to see if connection came from another NID */
if ((export->exp_connection->c_peer.nid != req->rq_peer.nid) &&
!hlist_unhashed(&export->exp_nid_hash))
- lustre_hash_del(export->exp_obd->obd_nid_hash,
- &export->exp_connection->c_peer.nid,
- &export->exp_nid_hash);
+ cfs_hash_del(export->exp_obd->obd_nid_hash,
+ &export->exp_connection->c_peer.nid,
+ &export->exp_nid_hash);
ptlrpc_connection_put(export->exp_connection);
}
req->rq_self,
&remote_uuid);
if (hlist_unhashed(&export->exp_nid_hash)) {
- lustre_hash_add_unique(export->exp_obd->obd_nid_hash,
- &export->exp_connection->c_peer.nid,
- &export->exp_nid_hash);
+ cfs_hash_add_unique(export->exp_obd->obd_nid_hash,
+ &export->exp_connection->c_peer.nid,
+ &export->exp_nid_hash);
}
spin_lock_bh(&target->obd_processing_task_lock);
if (lock->l_export && lock->l_export->exp_lock_hash &&
!hlist_unhashed(&lock->l_exp_hash))
- lustre_hash_del(lock->l_export->exp_lock_hash,
- &lock->l_remote_handle, &lock->l_exp_hash);
+ cfs_hash_del(lock->l_export->exp_lock_hash,
+ &lock->l_remote_handle, &lock->l_exp_hash);
ldlm_lock_remove_from_lru(lock);
class_handle_unhash(&lock->l_handle);
void ldlm_cancel_locks_for_export(struct obd_export *exp)
{
- lustre_hash_for_each_empty(exp->exp_lock_hash,
- ldlm_cancel_locks_for_export_cb, exp);
+ cfs_hash_for_each_empty(exp->exp_lock_hash,
+ ldlm_cancel_locks_for_export_cb, exp);
}
/**
if (unlikely(flags & LDLM_FL_REPLAY)) {
/* Find an existing lock in the per-export lock hash */
- lock = lustre_hash_lookup(req->rq_export->exp_lock_hash,
- (void *)&dlm_req->lock_handle[0]);
+ lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
+ (void *)&dlm_req->lock_handle[0]);
if (lock != NULL) {
DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
LPX64, lock->l_handle.h_cookie);
lock->l_export = class_export_lock_get(req->rq_export, lock);
if (lock->l_export->exp_lock_hash)
- lustre_hash_add(lock->l_export->exp_lock_hash,
- &lock->l_remote_handle,
- &lock->l_exp_hash);
+ cfs_hash_add(lock->l_export->exp_lock_hash,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
existing_lock:
lock->l_flags |= LDLM_FL_AST_SENT;
if (lock->l_export && lock->l_export->exp_lock_hash &&
!hlist_unhashed(&lock->l_exp_hash))
- lustre_hash_del(lock->l_export->exp_lock_hash,
- &lock->l_remote_handle, &lock->l_exp_hash);
+ cfs_hash_del(lock->l_export->exp_lock_hash,
+ &lock->l_remote_handle, &lock->l_exp_hash);
list_add_tail(&lock->l_rk_ast, rpc_list);
LDLM_LOCK_GET(lock);
ENTRY;
CFS_INIT_LIST_HEAD(&rpc_list);
- lustre_hash_for_each_empty(exp->exp_lock_hash,
- ldlm_revoke_lock_cb, &rpc_list);
+ cfs_hash_for_each_empty(exp->exp_lock_hash,
+ ldlm_revoke_lock_cb, &rpc_list);
ldlm_run_ast_work(&rpc_list, LDLM_WORK_REVOKE_AST);
EXIT;
* Export handle<->lock hash operations.
*/
static unsigned
-ldlm_export_lock_hash(lustre_hash_t *lh, void *key, unsigned mask)
+ldlm_export_lock_hash(cfs_hash_t *hs, void *key, unsigned mask)
{
- return lh_u64_hash(((struct lustre_handle *)key)->cookie, mask);
+ return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
}
static void *
RETURN(lock);
}
-static lustre_hash_ops_t ldlm_export_lock_ops = {
- .lh_hash = ldlm_export_lock_hash,
- .lh_key = ldlm_export_lock_key,
- .lh_compare = ldlm_export_lock_compare,
- .lh_get = ldlm_export_lock_get,
- .lh_put = ldlm_export_lock_put
+static cfs_hash_ops_t ldlm_export_lock_ops = {
+ .hs_hash = ldlm_export_lock_hash,
+ .hs_key = ldlm_export_lock_key,
+ .hs_compare = ldlm_export_lock_compare,
+ .hs_get = ldlm_export_lock_get,
+ .hs_put = ldlm_export_lock_put
};
int ldlm_init_export(struct obd_export *exp)
ENTRY;
exp->exp_lock_hash =
- lustre_hash_init(obd_uuid2str(&exp->exp_client_uuid),
- 7, 16, &ldlm_export_lock_ops, LH_REHASH);
+ cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
+ HASH_EXP_LOCK_CUR_BITS, HASH_EXP_LOCK_MAX_BITS,
+ &ldlm_export_lock_ops, CFS_HASH_REHASH);
if (!exp->exp_lock_hash)
RETURN(-ENOMEM);
void ldlm_destroy_export(struct obd_export *exp)
{
ENTRY;
- lustre_hash_exit(exp->exp_lock_hash);
+ cfs_hash_destroy(exp->exp_lock_hash);
exp->exp_lock_hash = NULL;
EXIT;
}
/* Key change rehash lock in per-export hash with new key */
if (exp->exp_lock_hash)
- lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
- &lock->l_remote_handle,
- &lock->l_exp_hash);
+ cfs_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
*flags = reply->lock_flags;
lock->l_flags |= reply->lock_flags & LDLM_INHERIT_FLAGS;
/* Key change rehash lock in per-export hash with new key */
exp = req->rq_export;
if (exp && exp->exp_lock_hash)
- lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
- &lock->l_remote_handle,
- &lock->l_exp_hash);
+ cfs_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
LDLM_DEBUG(lock, "replayed lock:");
ptlrpc_import_recovery_state_machine(req->rq_import);
extern struct lu_device_type lov_device_type;
/* pools */
-extern lustre_hash_ops_t pool_hash_operations;
+extern cfs_hash_ops_t pool_hash_operations;
/* ost_pool methods */
int lov_ost_pool_init(struct ost_pool *op, unsigned int count);
int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count);
RETURN(-ENOMEM);
cfs_waitq_init(&lov->lov_qos.lq_statfs_waitq);
- lov->lov_pools_hash_body = lustre_hash_init("POOLS",
- HASH_POOLS_CUR_BITS,
- HASH_POOLS_MAX_BITS,
- &pool_hash_operations, 0);
+ lov->lov_pools_hash_body = cfs_hash_create("POOLS", HASH_POOLS_CUR_BITS,
+ HASH_POOLS_CUR_BITS,
+ &pool_hash_operations, 0);
CFS_INIT_LIST_HEAD(&lov->lov_pool_list);
lov->lov_pool_count = 0;
rc = lov_ost_pool_init(&lov->lov_packed, 0);
CDEBUG(D_INFO, "delete pool %p\n", pool);
lov_pool_del(obd, pool->pool_name);
}
- lustre_hash_exit(lov->lov_pools_hash_body);
+ cfs_hash_destroy(lov->lov_pools_hash_body);
lov_ost_pool_free(&(lov->lov_qos.lq_rr.lqr_pool));
lov_ost_pool_free(&lov->lov_packed);
* Chapter 6.4.
* Addison Wesley, 1973
*/
-static __u32 pool_hashfn(lustre_hash_t *hash_body, void *key, unsigned mask)
+static __u32 pool_hashfn(cfs_hash_t *hash_body, void *key, unsigned mask)
{
int i;
__u32 result;
return (pool);
}
-lustre_hash_ops_t pool_hash_operations = {
- .lh_hash = pool_hashfn,
- .lh_key = pool_key,
- .lh_compare = pool_hashkey_compare,
- .lh_get = pool_hashrefcount_get,
- .lh_put = pool_hashrefcount_put,
+cfs_hash_ops_t pool_hash_operations = {
+ .hs_hash = pool_hashfn,
+ .hs_key = pool_key,
+ .hs_compare = pool_hashkey_compare,
+ .hs_get = pool_hashrefcount_get,
+ .hs_put = pool_hashrefcount_put,
};
#ifdef LPROCFS
spin_unlock(&obd->obd_dev_lock);
/* add to find only when it fully ready */
- rc = lustre_hash_add_unique(lov->lov_pools_hash_body, poolname,
- &new_pool->pool_hash);
+ rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
+ &new_pool->pool_hash);
if (rc)
GOTO(out_err, rc = -EEXIST);
lov = &(obd->u.lov);
/* lookup and kill hash reference */
- pool = lustre_hash_del_key(lov->lov_pools_hash_body, poolname);
+ pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
if (pool == NULL)
RETURN(-ENOENT);
lov = &(obd->u.lov);
- pool = lustre_hash_lookup(lov->lov_pools_hash_body, poolname);
+ pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
if (pool == NULL)
RETURN(-ENOENT);
lov = &(obd->u.lov);
- pool = lustre_hash_lookup(lov->lov_pools_hash_body, poolname);
+ pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
if (pool == NULL)
RETURN(-ENOENT);
pool = NULL;
if (poolname[0] != '\0') {
- pool = lustre_hash_lookup(lov->lov_pools_hash_body, poolname);
+ pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
if (pool == NULL)
CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n",
poolname);
unlock_res_and_lock(new_lock);
- lustre_hash_add(new_lock->l_export->exp_lock_hash,
- &new_lock->l_remote_handle,
- &new_lock->l_exp_hash);
+ cfs_hash_add(new_lock->l_export->exp_lock_hash,
+ &new_lock->l_remote_handle,
+ &new_lock->l_exp_hash);
LDLM_LOCK_RELEASE(new_lock);
lh->mlh_reg_lh.cookie = 0;
dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
remote_hdl = dlmreq->lock_handle[0];
- lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
+ lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
if (lock) {
if (lock != new_lock) {
lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
lh->mlh_reg_lh.cookie);
if (old_lock)
*old_lock = LDLM_LOCK_GET(lock);
- lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
+ cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
return;
}
- lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
+ cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
}
/*
sscanf(buffer, "%40s", tmpbuf);
obd_str2uuid(&uuid, tmpbuf);
- exp = lustre_hash_lookup(obd->obd_uuid_hash, &uuid);
+ exp = cfs_hash_lookup(obd->obd_uuid_hash, &uuid);
if (exp == NULL) {
CERROR("%s: no export %s found\n",
obd->obd_name, obd_uuid2str(&uuid));
sources:
obdclass-all-objs := llog.o llog_cat.o llog_lvfs.o llog_obd.o llog_swab.o
-obdclass-all-objs += class_obd.o class_hash.o
-obdclass-all-objs += debug.o genops.o uuid.o llog_ioctl.o
+obdclass-all-objs += class_obd.o debug.o genops.o uuid.o llog_ioctl.o
obdclass-all-objs += lprocfs_status.o lustre_handles.o lustre_peer.o
obdclass-all-objs += statfs_pack.o obdo.o obd_config.o obd_mount.o mea.o
obdclass-all-objs += lu_object.o dt_object.o hash.o capa.o lu_time.o
noinst_LIBRARIES = liblustreclass.a
liblustreclass_a_SOURCES = class_obd.c debug.c genops.c statfs_pack.c mea.c uuid.c
-liblustreclass_a_SOURCES += lustre_handles.c lustre_peer.c lprocfs_status.c class_hash.c
+liblustreclass_a_SOURCES += lustre_handles.c lustre_peer.c lprocfs_status.c
liblustreclass_a_SOURCES += obdo.c obd_config.c llog.c llog_obd.c llog_cat.c
liblustreclass_a_SOURCES += llog_lvfs.c llog_swab.c capa.c
liblustreclass_a_SOURCES += lu_object.c cl_object.c lu_time.c lu_ref.c
#include <obd_support.h>
#include <lustre_fid.h>
#include <libcfs/list.h>
-#include <class_hash.h> /* for lustre_hash stuff */
+#include <libcfs/libcfs_hash.h> /* for cfs_hash stuff */
/* lu_time_global_{init,fini}() */
#include <lu_time.h>
*/
struct hlist_node ce_node;
/**
- * Owner for the current cl_env, the key for lustre_hash.
+ * Owner for the current cl_env, the key for cfs_hash.
* Now current thread pointer is stored.
*/
void *ce_owner;
} while (0)
/*****************************************************************************
- * Routins to use lustre_hash functionality to bind the current thread
+ * Routins to use cfs_hash functionality to bind the current thread
* to cl_env
*/
/** lustre hash to manage the cl_env for current thread */
-static lustre_hash_t *cl_env_hash;
+static cfs_hash_t *cl_env_hash;
static void cl_env_init0(struct cl_env *cle, void *debug);
-static unsigned cl_env_hops_hash(lustre_hash_t *lh, void *key, unsigned mask)
+static unsigned cl_env_hops_hash(cfs_hash_t *lh, void *key, unsigned mask)
{
#if BITS_PER_LONG == 64
- return lh_u64_hash((__u64)key, mask);
+ return cfs_hash_u64_hash((__u64)key, mask);
#else
- return lh_u32_hash((__u32)key, mask);
+ return cfs_hash_u32_hash((__u32)key, mask);
#endif
}
return (key == cle->ce_owner);
}
-static lustre_hash_ops_t cl_env_hops = {
- .lh_hash = cl_env_hops_hash,
- .lh_compare = cl_env_hops_compare,
- .lh_key = cl_env_hops_obj,
- .lh_get = cl_env_hops_obj,
- .lh_put = cl_env_hops_obj,
+static cfs_hash_ops_t cl_env_hops = {
+ .hs_hash = cl_env_hops_hash,
+ .hs_compare = cl_env_hops_compare,
+ .hs_key = cl_env_hops_obj,
+ .hs_get = cl_env_hops_obj,
+ .hs_put = cl_env_hops_obj,
};
static inline struct cl_env *cl_env_fetch(void)
{
struct cl_env *cle;
- cle = lustre_hash_lookup(cl_env_hash, cfs_current());
+ cle = cfs_hash_lookup(cl_env_hash, cfs_current());
LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
return cle;
}
int rc;
LASSERT(cle->ce_owner == NULL);
cle->ce_owner = cfs_current();
- rc = lustre_hash_add_unique(cl_env_hash, cle->ce_owner,
+ rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
&cle->ce_node);
LASSERT(rc == 0);
}
if (cle && cle->ce_owner) {
void *cookie;
LASSERT(cle->ce_owner == cfs_current());
- cookie = lustre_hash_del(cl_env_hash, cle->ce_owner,
+ cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
&cle->ce_node);
cle->ce_owner = NULL;
LASSERT(cookie == cle);
{
int result;
- cl_env_hash = lustre_hash_init("cl_env", 8, 10, &cl_env_hops, 0);
+ cl_env_hash = cfs_hash_create("cl_env", 8, 10, &cl_env_hops,
+ CFS_HASH_REHASH);
if (cl_env_hash == NULL)
return -ENOMEM;
}
}
if (result)
- lustre_hash_exit(cl_env_hash);
+ cfs_hash_destroy(cl_env_hash);
return result;
}
cl_page_fini();
lu_context_key_degister(&cl_key);
lu_kmem_fini(cl_object_caches);
- lustre_hash_exit(cl_env_hash);
+ cfs_hash_destroy(cl_env_hash);
}
+++ /dev/null
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/class_hash.c
- *
- * Implement a hash class for hash process in lustre system.
- *
- * Author: YuZhangyong <yzy@clusterfs.com>
- *
- * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
- * - Simplified API and improved documentation
- * - Added per-hash feature flags:
- * * LH_DEBUG additional validation
- * * LH_REHASH dynamic rehashing
- * - Added per-hash statistics
- * - General performance enhancements
- */
-
-#ifndef __KERNEL__
-#include <liblustre.h>
-#include <obd.h>
-#endif
-
-#include <class_hash.h>
-
-static void
-lh_read_lock(lustre_hash_t *lh)
-{
- if ((lh->lh_flags & LH_REHASH) != 0)
- read_lock(&lh->lh_rwlock);
-}
-
-static void
-lh_read_unlock(lustre_hash_t *lh)
-{
- if ((lh->lh_flags & LH_REHASH) != 0)
- read_unlock(&lh->lh_rwlock);
-}
-
-static void
-lh_write_lock(lustre_hash_t *lh)
-{
- if ((lh->lh_flags & LH_REHASH) != 0)
- write_lock(&lh->lh_rwlock);
-}
-
-static void
-lh_write_unlock(lustre_hash_t *lh)
-{
- if ((lh->lh_flags & LH_REHASH) != 0)
- write_unlock(&lh->lh_rwlock);
-}
-
-/**
- * Initialize new lustre hash, where:
- * \param name Descriptive hash name
- * \param cur_bits Initial hash table size, in bits
- * \param max_bits Maximum allowed hash table resize, in bits
- * \param ops Registered hash table operations
- * \param flags LH_REHASH enables dynamic hash resizing
- * LH_SORT enables chained hash sort
- */
-lustre_hash_t *
-lustre_hash_init(char *name, unsigned int cur_bits, unsigned int max_bits,
- lustre_hash_ops_t *ops, int flags)
-{
- lustre_hash_t *lh;
- int i;
- ENTRY;
-
- LASSERT(name != NULL);
- LASSERT(ops != NULL);
-
- LASSERT(cur_bits > 0);
- LASSERT(max_bits >= cur_bits);
- LASSERT(max_bits < 31);
-
- LIBCFS_ALLOC(lh, sizeof(*lh));
- if (!lh)
- RETURN(NULL);
-
- strncpy(lh->lh_name, name, sizeof(lh->lh_name));
- lh->lh_name[sizeof(lh->lh_name) - 1] = '\0';
- atomic_set(&lh->lh_rehash_count, 0);
- atomic_set(&lh->lh_count, 0);
- rwlock_init(&lh->lh_rwlock);
- lh->lh_cur_bits = cur_bits;
- lh->lh_cur_mask = (1 << cur_bits) - 1;
- lh->lh_min_bits = cur_bits;
- lh->lh_max_bits = max_bits;
- /* XXX: need to fixup lustre_hash_rehash_bits() before this can be
- * anything other than 0.5 and 2.0 */
- lh->lh_min_theta = 1 << (LH_THETA_BITS - 1);
- lh->lh_max_theta = 1 << (LH_THETA_BITS + 1);
- lh->lh_ops = ops;
- lh->lh_flags = flags;
- if (cur_bits != max_bits && (lh->lh_flags & LH_REHASH) == 0)
- CWARN("Rehash is disabled, ignore max_bits %d\n", max_bits);
-
- /* theta * 1000 */
- __lustre_hash_set_theta(lh, 500, 2000);
-
- LIBCFS_ALLOC(lh->lh_buckets, sizeof(*lh->lh_buckets) << lh->lh_cur_bits);
- if (lh->lh_buckets == NULL) {
- LIBCFS_FREE(lh, sizeof(*lh));
- RETURN(NULL);
- }
-
- for (i = 0; i <= lh->lh_cur_mask; i++) {
- LIBCFS_ALLOC(lh->lh_buckets[i], sizeof(lustre_hash_bucket_t));
- if (lh->lh_buckets[i] == NULL) {
- lustre_hash_exit(lh);
- return NULL;
- }
-
- INIT_HLIST_HEAD(&lh->lh_buckets[i]->lhb_head);
- rwlock_init(&lh->lh_buckets[i]->lhb_rwlock);
- atomic_set(&lh->lh_buckets[i]->lhb_count, 0);
- }
-
- return lh;
-}
-EXPORT_SYMBOL(lustre_hash_init);
-
-/**
- * Cleanup lustre hash \a lh.
- */
-void
-lustre_hash_exit(lustre_hash_t *lh)
-{
- lustre_hash_bucket_t *lhb;
- struct hlist_node *hnode;
- struct hlist_node *pos;
- int i;
- ENTRY;
-
- LASSERT(lh != NULL);
-
- lh_write_lock(lh);
-
- lh_for_each_bucket(lh, lhb, i) {
- if (lhb == NULL)
- continue;
-
- write_lock(&lhb->lhb_rwlock);
- hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
- __lustre_hash_bucket_validate(lh, lhb, hnode);
- __lustre_hash_bucket_del(lh, lhb, hnode);
- lh_exit(lh, hnode);
- }
-
- LASSERT(hlist_empty(&(lhb->lhb_head)));
- LASSERT(atomic_read(&lhb->lhb_count) == 0);
- write_unlock(&lhb->lhb_rwlock);
- LIBCFS_FREE(lhb, sizeof(*lhb));
- }
-
- LASSERT(atomic_read(&lh->lh_count) == 0);
- lh_write_unlock(lh);
-
- LIBCFS_FREE(lh->lh_buckets, sizeof(*lh->lh_buckets) << lh->lh_cur_bits);
- LIBCFS_FREE(lh, sizeof(*lh));
- EXIT;
-}
-EXPORT_SYMBOL(lustre_hash_exit);
-
-static inline unsigned int lustre_hash_rehash_bits(lustre_hash_t *lh)
-{
- if (!(lh->lh_flags & LH_REHASH))
- return 0;
-
- /* XXX: need to handle case with max_theta != 2.0
- * and the case with min_theta != 0.5 */
- if ((lh->lh_cur_bits < lh->lh_max_bits) &&
- (__lustre_hash_theta(lh) > lh->lh_max_theta))
- return lh->lh_cur_bits + 1;
-
- if ((lh->lh_cur_bits > lh->lh_min_bits) &&
- (__lustre_hash_theta(lh) < lh->lh_min_theta))
- return lh->lh_cur_bits - 1;
-
- return 0;
-}
-
-/**
- * Add item \a hnode to lustre hash \a lh using \a key. The registered
- * ops->lh_get function will be called when the item is added.
- */
-void
-lustre_hash_add(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
-{
- lustre_hash_bucket_t *lhb;
- int bits;
- unsigned i;
- ENTRY;
-
- __lustre_hash_key_validate(lh, key, hnode);
-
- lh_read_lock(lh);
- i = lh_hash(lh, key, lh->lh_cur_mask);
- lhb = lh->lh_buckets[i];
- LASSERT(i <= lh->lh_cur_mask);
- LASSERT(hlist_unhashed(hnode));
-
- write_lock(&lhb->lhb_rwlock);
- __lustre_hash_bucket_add(lh, lhb, hnode);
- write_unlock(&lhb->lhb_rwlock);
-
- bits = lustre_hash_rehash_bits(lh);
- lh_read_unlock(lh);
- if (bits)
- lustre_hash_rehash(lh, bits);
-
- EXIT;
-}
-EXPORT_SYMBOL(lustre_hash_add);
-
-static struct hlist_node *
-lustre_hash_findadd_unique_hnode(lustre_hash_t *lh, void *key,
- struct hlist_node *hnode)
-{
- int bits = 0;
- struct hlist_node *ehnode;
- lustre_hash_bucket_t *lhb;
- unsigned i;
- ENTRY;
-
- __lustre_hash_key_validate(lh, key, hnode);
-
- lh_read_lock(lh);
- i = lh_hash(lh, key, lh->lh_cur_mask);
- lhb = lh->lh_buckets[i];
- LASSERT(i <= lh->lh_cur_mask);
- LASSERT(hlist_unhashed(hnode));
-
- write_lock(&lhb->lhb_rwlock);
- ehnode = __lustre_hash_bucket_lookup(lh, lhb, key);
- if (ehnode) {
- lh_get(lh, ehnode);
- } else {
- __lustre_hash_bucket_add(lh, lhb, hnode);
- ehnode = hnode;
- bits = lustre_hash_rehash_bits(lh);
- }
- write_unlock(&lhb->lhb_rwlock);
- lh_read_unlock(lh);
- if (bits)
- lustre_hash_rehash(lh, bits);
-
- RETURN(ehnode);
-}
-
-/**
- * Add item \a hnode to lustre hash \a lh using \a key. The registered
- * ops->lh_get function will be called if the item was added.
- * Returns 0 on success or -EALREADY on key collisions.
- */
-int
-lustre_hash_add_unique(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
-{
- struct hlist_node *ehnode;
- ENTRY;
-
- ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
- if (ehnode != hnode) {
- lh_put(lh, ehnode);
- RETURN(-EALREADY);
- }
- RETURN(0);
-}
-EXPORT_SYMBOL(lustre_hash_add_unique);
-
-/**
- * Add item \a hnode to lustre hash \a lh using \a key. If this \a key
- * already exists in the hash then ops->lh_get will be called on the
- * conflicting entry and that entry will be returned to the caller.
- * Otherwise ops->lh_get is called on the item which was added.
- */
-void *
-lustre_hash_findadd_unique(lustre_hash_t *lh, void *key,
- struct hlist_node *hnode)
-{
- struct hlist_node *ehnode;
- void *obj;
- ENTRY;
-
- ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
- obj = lh_get(lh, ehnode);
- lh_put(lh, ehnode);
- RETURN(obj);
-}
-EXPORT_SYMBOL(lustre_hash_findadd_unique);
-
-/**
- * Delete item \a hnode from the lustre hash \a lh using \a key. The \a key
- * is required to ensure the correct hash bucket is locked since there
- * is no direct linkage from the item to the bucket. The object
- * removed from the hash will be returned and obs->lh_put is called
- * on the removed object.
- */
-void *
-lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
-{
- lustre_hash_bucket_t *lhb;
- unsigned i;
- void *obj;
- ENTRY;
-
- __lustre_hash_key_validate(lh, key, hnode);
-
- lh_read_lock(lh);
- i = lh_hash(lh, key, lh->lh_cur_mask);
- lhb = lh->lh_buckets[i];
- LASSERT(i <= lh->lh_cur_mask);
- LASSERT(!hlist_unhashed(hnode));
-
- write_lock(&lhb->lhb_rwlock);
- obj = __lustre_hash_bucket_del(lh, lhb, hnode);
- write_unlock(&lhb->lhb_rwlock);
- lh_read_unlock(lh);
-
- RETURN(obj);
-}
-EXPORT_SYMBOL(lustre_hash_del);
-
-/**
- * Delete item given \a key in lustre hash \a lh. The first \a key found in
- * the hash will be removed, if the key exists multiple times in the hash
- * \a lh this function must be called once per key. The removed object
- * will be returned and ops->lh_put is called on the removed object.
- */
-void *
-lustre_hash_del_key(lustre_hash_t *lh, void *key)
-{
- struct hlist_node *hnode;
- lustre_hash_bucket_t *lhb;
- unsigned i;
- void *obj = NULL;
- ENTRY;
-
- lh_read_lock(lh);
- i = lh_hash(lh, key, lh->lh_cur_mask);
- lhb = lh->lh_buckets[i];
- LASSERT(i <= lh->lh_cur_mask);
-
- write_lock(&lhb->lhb_rwlock);
- hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
- if (hnode)
- obj = __lustre_hash_bucket_del(lh, lhb, hnode);
-
- write_unlock(&lhb->lhb_rwlock);
- lh_read_unlock(lh);
-
- RETURN(obj);
-}
-EXPORT_SYMBOL(lustre_hash_del_key);
-
-/**
- * Lookup an item using \a key in the lustre hash \a lh and return it.
- * If the \a key is found in the hash lh->lh_get() is called and the
- * matching objects is returned. It is the callers responsibility
- * to call the counterpart ops->lh_put using the lh_put() macro
- * when when finished with the object. If the \a key was not found
- * in the hash \a lh NULL is returned.
- */
-void *
-lustre_hash_lookup(lustre_hash_t *lh, void *key)
-{
- struct hlist_node *hnode;
- lustre_hash_bucket_t *lhb;
- unsigned i;
- void *obj = NULL;
- ENTRY;
-
- lh_read_lock(lh);
- i = lh_hash(lh, key, lh->lh_cur_mask);
- lhb = lh->lh_buckets[i];
- LASSERT(i <= lh->lh_cur_mask);
-
- read_lock(&lhb->lhb_rwlock);
- hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
- if (hnode)
- obj = lh_get(lh, hnode);
-
- read_unlock(&lhb->lhb_rwlock);
- lh_read_unlock(lh);
-
- RETURN(obj);
-}
-EXPORT_SYMBOL(lustre_hash_lookup);
-
-/**
- * For each item in the lustre hash \a lh call the passed callback \a func
- * and pass to it as an argument each hash item and the private \a data.
- * Before each callback ops->lh_get will be called, and after each
- * callback ops->lh_put will be called. Finally, during the callback
- * the bucket lock is held so the callback must never sleep.
- */
-void
-lustre_hash_for_each(lustre_hash_t *lh, lh_for_each_cb func, void *data)
-{
- struct hlist_node *hnode;
- lustre_hash_bucket_t *lhb;
- void *obj;
- int i;
- ENTRY;
-
- lh_read_lock(lh);
- lh_for_each_bucket(lh, lhb, i) {
- read_lock(&lhb->lhb_rwlock);
- hlist_for_each(hnode, &(lhb->lhb_head)) {
- __lustre_hash_bucket_validate(lh, lhb, hnode);
- obj = lh_get(lh, hnode);
- func(obj, data);
- (void)lh_put(lh, hnode);
- }
- read_unlock(&lhb->lhb_rwlock);
- }
- lh_read_unlock(lh);
-
- EXIT;
-}
-EXPORT_SYMBOL(lustre_hash_for_each);
-
-/**
- * For each item in the lustre hash \a lh call the passed callback \a func
- * and pass to it as an argument each hash item and the private \a data.
- * Before each callback ops->lh_get will be called, and after each
- * callback ops->lh_put will be called. During the callback the
- * bucket lock will not be held will allows for the current item
- * to be removed from the hash during the callback. However, care
- * should be taken to prevent other callers from operating on the
- * hash concurrently or list corruption may occur.
- */
-void
-lustre_hash_for_each_safe(lustre_hash_t *lh, lh_for_each_cb func, void *data)
-{
- struct hlist_node *hnode;
- struct hlist_node *pos;
- lustre_hash_bucket_t *lhb;
- void *obj;
- int i;
- ENTRY;
-
- lh_read_lock(lh);
- lh_for_each_bucket(lh, lhb, i) {
- read_lock(&lhb->lhb_rwlock);
- hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
- __lustre_hash_bucket_validate(lh, lhb, hnode);
- obj = lh_get(lh, hnode);
- read_unlock(&lhb->lhb_rwlock);
- func(obj, data);
- read_lock(&lhb->lhb_rwlock);
- (void)lh_put(lh, hnode);
- }
- read_unlock(&lhb->lhb_rwlock);
- }
- lh_read_unlock(lh);
- EXIT;
-}
-EXPORT_SYMBOL(lustre_hash_for_each_safe);
-
-/**
- * For each hash bucket in the lustre hash \a lh call the passed callback
- * \a func until all the hash buckets are empty. The passed callback \a func
- * or the previously registered callback lh->lh_put must remove the item
- * from the hash. You may either use the lustre_hash_del() or hlist_del()
- * functions. No rwlocks will be held during the callback \a func it is
- * safe to sleep if needed. This function will not terminate until the
- * hash is empty. Note it is still possible to concurrently add new
- * items in to the hash. It is the callers responsibility to ensure
- * the required locking is in place to prevent concurrent insertions.
- */
-void
-lustre_hash_for_each_empty(lustre_hash_t *lh, lh_for_each_cb func, void *data)
-{
- struct hlist_node *hnode;
- lustre_hash_bucket_t *lhb;
- void *obj;
- int i;
- ENTRY;
-
-restart:
- lh_read_lock(lh);
- lh_for_each_bucket(lh, lhb, i) {
- write_lock(&lhb->lhb_rwlock);
- while (!hlist_empty(&lhb->lhb_head)) {
- hnode = lhb->lhb_head.first;
- __lustre_hash_bucket_validate(lh, lhb, hnode);
- obj = lh_get(lh, hnode);
- write_unlock(&lhb->lhb_rwlock);
- lh_read_unlock(lh);
- func(obj, data);
- (void)lh_put(lh, hnode);
- goto restart;
- }
- write_unlock(&lhb->lhb_rwlock);
- }
- lh_read_unlock(lh);
- EXIT;
-}
-EXPORT_SYMBOL(lustre_hash_for_each_empty);
-
-/*
- * For each item in the lustre hash \a lh which matches the \a key call
- * the passed callback \a func and pass to it as an argument each hash
- * item and the private \a data. Before each callback ops->lh_get will
- * be called, and after each callback ops->lh_put will be called.
- * Finally, during the callback the bucket lock is held so the
- * callback must never sleep.
- */
-void
-lustre_hash_for_each_key(lustre_hash_t *lh, void *key,
- lh_for_each_cb func, void *data)
-{
- struct hlist_node *hnode;
- lustre_hash_bucket_t *lhb;
- unsigned i;
- ENTRY;
-
- lh_read_lock(lh);
- i = lh_hash(lh, key, lh->lh_cur_mask);
- lhb = lh->lh_buckets[i];
- LASSERT(i <= lh->lh_cur_mask);
-
- read_lock(&lhb->lhb_rwlock);
- hlist_for_each(hnode, &(lhb->lhb_head)) {
- __lustre_hash_bucket_validate(lh, lhb, hnode);
-
- if (!lh_compare(lh, key, hnode))
- continue;
-
- func(lh_get(lh, hnode), data);
- (void)lh_put(lh, hnode);
- }
-
- read_unlock(&lhb->lhb_rwlock);
- lh_read_unlock(lh);
-
- EXIT;
-}
-EXPORT_SYMBOL(lustre_hash_for_each_key);
-
-/**
- * Rehash the lustre hash \a lh to the given \a bits. This can be used
- * to grow the hash size when excessive chaining is detected, or to
- * shrink the hash when it is larger than needed. When the LH_REHASH
- * flag is set in \a lh the lustre hash may be dynamically rehashed
- * during addition or removal if the hash's theta value exceeds
- * either the lh->lh_min_theta or lh->max_theta values. By default
- * these values are tuned to keep the chained hash depth small, and
- * this approach assumes a reasonably uniform hashing function. The
- * theta thresholds for \a lh are tunable via lustre_hash_set_theta().
- */
-int
-lustre_hash_rehash(lustre_hash_t *lh, int bits)
-{
- struct hlist_node *hnode;
- struct hlist_node *pos;
- lustre_hash_bucket_t **lh_buckets;
- lustre_hash_bucket_t **rehash_buckets;
- lustre_hash_bucket_t *lh_lhb;
- lustre_hash_bucket_t *rehash_lhb;
- int i;
- int theta;
- int lh_mask;
- int lh_bits;
- int mask = (1 << bits) - 1;
- int rc = 0;
- void *key;
- ENTRY;
-
- LASSERT(!in_interrupt());
- LASSERT(mask > 0);
- LASSERT((lh->lh_flags & LH_REHASH) != 0);
-
- LIBCFS_ALLOC(rehash_buckets, sizeof(*rehash_buckets) << bits);
- if (!rehash_buckets)
- RETURN(-ENOMEM);
-
- for (i = 0; i <= mask; i++) {
- LIBCFS_ALLOC(rehash_buckets[i], sizeof(*rehash_buckets[i]));
- if (rehash_buckets[i] == NULL)
- GOTO(free, rc = -ENOMEM);
-
- INIT_HLIST_HEAD(&rehash_buckets[i]->lhb_head);
- rwlock_init(&rehash_buckets[i]->lhb_rwlock);
- atomic_set(&rehash_buckets[i]->lhb_count, 0);
- }
-
- lh_write_lock(lh);
-
- /*
- * Early return for multiple concurrent racing callers,
- * ensure we only trigger the rehash if it is still needed.
- */
- theta = __lustre_hash_theta(lh);
- if ((theta >= lh->lh_min_theta) && (theta <= lh->lh_max_theta)) {
- lh_write_unlock(lh);
- GOTO(free, rc = -EALREADY);
- }
-
- lh_bits = lh->lh_cur_bits;
- lh_buckets = lh->lh_buckets;
- lh_mask = (1 << lh_bits) - 1;
-
- lh->lh_cur_bits = bits;
- lh->lh_cur_mask = (1 << bits) - 1;
- lh->lh_buckets = rehash_buckets;
- atomic_inc(&lh->lh_rehash_count);
-
- for (i = 0; i <= lh_mask; i++) {
- lh_lhb = lh_buckets[i];
-
- write_lock(&lh_lhb->lhb_rwlock);
- hlist_for_each_safe(hnode, pos, &(lh_lhb->lhb_head)) {
- key = lh_key(lh, hnode);
- LASSERT(key);
-
- /*
- * Validate hnode is in the correct bucket.
- */
- if (unlikely(lh->lh_flags & LH_DEBUG))
- LASSERT(lh_hash(lh, key, lh_mask) == i);
-
- /*
- * Delete from old hash bucket.
- */
- hlist_del(hnode);
- LASSERT(atomic_read(&lh_lhb->lhb_count) > 0);
- atomic_dec(&lh_lhb->lhb_count);
-
- /*
- * Add to rehash bucket, ops->lh_key must be defined.
- */
- rehash_lhb = rehash_buckets[lh_hash(lh, key, mask)];
- hlist_add_head(hnode, &(rehash_lhb->lhb_head));
- atomic_inc(&rehash_lhb->lhb_count);
- }
-
- LASSERT(hlist_empty(&(lh_lhb->lhb_head)));
- LASSERT(atomic_read(&lh_lhb->lhb_count) == 0);
- write_unlock(&lh_lhb->lhb_rwlock);
- }
-
- lh_write_unlock(lh);
- rehash_buckets = lh_buckets;
- i = (1 << lh_bits) - 1;
- bits = lh_bits;
- free:
- while (i-- > 0)
- LIBCFS_FREE(rehash_buckets[i], sizeof(*rehash_buckets[i]));
- LIBCFS_FREE(rehash_buckets, sizeof(*rehash_buckets) << bits);
- RETURN(rc);
-}
-EXPORT_SYMBOL(lustre_hash_rehash);
-
-/**
- * Rehash the object referenced by \a hnode in the lustre hash \a lh. The
- * \a old_key must be provided to locate the objects previous location
- * in the hash, and the \a new_key will be used to reinsert the object.
- * Use this function instead of a lustre_hash_add() + lustre_hash_del()
- * combo when it is critical that there is no window in time where the
- * object is missing from the hash. When an object is being rehashed
- * the registered lh_get() and lh_put() functions will not be called.
- */
-void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key, void *new_key,
- struct hlist_node *hnode)
-{
- lustre_hash_bucket_t *old_lhb;
- lustre_hash_bucket_t *new_lhb;
- unsigned i;
- unsigned j;
- ENTRY;
-
- __lustre_hash_key_validate(lh, new_key, hnode);
- LASSERT(!hlist_unhashed(hnode));
-
- lh_read_lock(lh);
-
- i = lh_hash(lh, old_key, lh->lh_cur_mask);
- old_lhb = lh->lh_buckets[i];
- LASSERT(i <= lh->lh_cur_mask);
-
- j = lh_hash(lh, new_key, lh->lh_cur_mask);
- new_lhb = lh->lh_buckets[j];
- LASSERT(j <= lh->lh_cur_mask);
-
- if (i < j) { /* write_lock ordering */
- write_lock(&old_lhb->lhb_rwlock);
- write_lock(&new_lhb->lhb_rwlock);
- } else if (i > j) {
- write_lock(&new_lhb->lhb_rwlock);
- write_lock(&old_lhb->lhb_rwlock);
- } else { /* do nothing */
- read_unlock(&lh->lh_rwlock);
- EXIT;
- return;
- }
-
- /*
- * Migrate item between hash buckets without calling
- * the lh_get() and lh_put() callback functions.
- */
- hlist_del(hnode);
- LASSERT(atomic_read(&old_lhb->lhb_count) > 0);
- atomic_dec(&old_lhb->lhb_count);
- hlist_add_head(hnode, &(new_lhb->lhb_head));
- atomic_inc(&new_lhb->lhb_count);
-
- write_unlock(&new_lhb->lhb_rwlock);
- write_unlock(&old_lhb->lhb_rwlock);
- lh_read_unlock(lh);
-
- EXIT;
-}
-EXPORT_SYMBOL(lustre_hash_rehash_key);
-
-int lustre_hash_debug_header(char *str, int size)
-{
- return snprintf(str, size,
- "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%6s%s\n", LUSTRE_MAX_HASH_NAME,
- "name", "cur", "min", "max", "theta", "t-min", "t-max",
- "flags", "rehash", "count", " distribution");
-}
-EXPORT_SYMBOL(lustre_hash_debug_header);
-
-int lustre_hash_debug_str(lustre_hash_t *lh, char *str, int size)
-{
- lustre_hash_bucket_t *lhb;
- int theta;
- int i;
- int c = 0;
- int dist[8] = { 0, };
-
- if (str == NULL || size == 0)
- return 0;
-
- lh_read_lock(lh);
- theta = __lustre_hash_theta(lh);
-
- c += snprintf(str + c, size - c, "%-*s ",
- LUSTRE_MAX_HASH_NAME, lh->lh_name);
- c += snprintf(str + c, size - c, "%5d ", 1 << lh->lh_cur_bits);
- c += snprintf(str + c, size - c, "%5d ", 1 << lh->lh_min_bits);
- c += snprintf(str + c, size - c, "%5d ", 1 << lh->lh_max_bits);
- c += snprintf(str + c, size - c, "%d.%03d ",
- __lustre_hash_theta_int(theta),
- __lustre_hash_theta_frac(theta));
- c += snprintf(str + c, size - c, "%d.%03d ",
- __lustre_hash_theta_int(lh->lh_min_theta),
- __lustre_hash_theta_frac(lh->lh_min_theta));
- c += snprintf(str + c, size - c, "%d.%03d ",
- __lustre_hash_theta_int(lh->lh_max_theta),
- __lustre_hash_theta_frac(lh->lh_max_theta));
- c += snprintf(str + c, size - c, " 0x%02x ", lh->lh_flags);
- c += snprintf(str + c, size - c, "%6d ",
- atomic_read(&lh->lh_rehash_count));
- c += snprintf(str + c, size - c, "%5d ",
- atomic_read(&lh->lh_count));
-
- /*
- * The distribution is a summary of the chained hash depth in
- * each of the lustre hash buckets. Each buckets lhb_count is
- * divided by the hash theta value and used to generate a
- * histogram of the hash distribution. A uniform hash will
- * result in all hash buckets being close to the average thus
- * only the first few entries in the histogram will be non-zero.
- * If you hash function results in a non-uniform hash the will
- * be observable by outlier bucks in the distribution histogram.
- *
- * Uniform hash distribution: 128/128/0/0/0/0/0/0
- * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
- */
- lh_for_each_bucket(lh, lhb, i)
- dist[min(__fls(atomic_read(&lhb->lhb_count)/max(theta,1)),7)]++;
-
- for (i = 0; i < 8; i++)
- c += snprintf(str + c, size - c, "%d%c", dist[i],
- (i == 7) ? '\n' : '/');
-
- lh_read_unlock(lh);
-
- return c;
-}
-EXPORT_SYMBOL(lustre_hash_debug_str);
#include <obd_ost.h>
#include <obd_class.h>
#include <lprocfs_status.h>
-#include <class_hash.h>
extern struct list_head obd_types;
spinlock_t obd_types_lock;
GOTO(exit_err, rc = -ENODEV);
if (!obd_uuid_equals(cluuid, &obd->obd_uuid)) {
- rc = lustre_hash_add_unique(obd->obd_uuid_hash, cluuid,
- &export->exp_uuid_hash);
+ rc = cfs_hash_add_unique(obd->obd_uuid_hash, cluuid,
+ &export->exp_uuid_hash);
if (rc != 0) {
LCONSOLE_WARN("%s: denying duplicate export for %s, %d\n",
obd->obd_name, cluuid->uuid, rc);
spin_lock(&exp->exp_obd->obd_dev_lock);
/* delete an uuid-export hashitem from hashtables */
if (!hlist_unhashed(&exp->exp_uuid_hash))
- lustre_hash_del(exp->exp_obd->obd_uuid_hash,
- &exp->exp_client_uuid,
- &exp->exp_uuid_hash);
+ cfs_hash_del(exp->exp_obd->obd_uuid_hash,
+ &exp->exp_client_uuid,
+ &exp->exp_uuid_hash);
list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
list_del_init(&exp->exp_obd_chain_timed);
export->exp_handle.h_cookie);
if (!hlist_unhashed(&export->exp_nid_hash))
- lustre_hash_del(export->exp_obd->obd_nid_hash,
- &export->exp_connection->c_peer.nid,
- &export->exp_nid_hash);
+ cfs_hash_del(export->exp_obd->obd_nid_hash,
+ &export->exp_connection->c_peer.nid,
+ &export->exp_nid_hash);
class_export_recovery_cleanup(export);
class_unlink_export(export);
lnet_nid_t nid_key = libcfs_str2nid((char *)nid);
do {
- doomed_exp = lustre_hash_lookup(obd->obd_nid_hash, &nid_key);
+ doomed_exp = cfs_hash_lookup(obd->obd_nid_hash, &nid_key);
if (doomed_exp == NULL)
break;
return exports_evicted;
}
- doomed_exp = lustre_hash_lookup(obd->obd_uuid_hash, &doomed_uuid);
+ doomed_exp = cfs_hash_lookup(obd->obd_uuid_hash, &doomed_uuid);
if (doomed_exp == NULL) {
CERROR("%s: can't disconnect %s: no exports found\n",
*eof = 1;
page[0] = '\0';
lprocfs_exp_rd_cb_data_init(&cb_data, page, count, eof, &len);
- lustre_hash_for_each_key(obd->obd_nid_hash, &stats->nid,
- lprocfs_exp_print_uuid, &cb_data);
+ cfs_hash_for_each_key(obd->obd_nid_hash, &stats->nid,
+ lprocfs_exp_print_uuid, &cb_data);
return (*cb_data.len);
}
{
struct exp_uuid_cb_data *data = cb_data;
struct obd_export *exp = obj;
- lustre_hash_t *lh;
+ cfs_hash_t *hs;
- lh = exp->exp_lock_hash;
- if (lh) {
+ hs = exp->exp_lock_hash;
+ if (hs) {
if (!*data->len)
- *data->len += lustre_hash_debug_header(data->page,
- data->count);
+ *data->len += cfs_hash_debug_header(data->page,
+ data->count);
- *data->len += lustre_hash_debug_str(lh, data->page + *data->len,
- data->count);
+ *data->len += cfs_hash_debug_str(hs, data->page + *data->len,
+ data->count);
}
}
page[0] = '\0';
lprocfs_exp_rd_cb_data_init(&cb_data, page, count, eof, &len);
- lustre_hash_for_each_key(obd->obd_nid_hash, &stats->nid,
- lprocfs_exp_print_hash, &cb_data);
+ cfs_hash_for_each_key(obd->obd_nid_hash, &stats->nid,
+ lprocfs_exp_print_hash, &cb_data);
return (*cb_data.len);
}
struct nid_stat *client_stat;
CFS_LIST_HEAD(free_list);
- lustre_hash_for_each(obd->obd_nid_stats_hash,
- lprocfs_nid_stats_clear_write_cb, &free_list);
+ cfs_hash_for_each(obd->obd_nid_stats_hash,
+ lprocfs_nid_stats_clear_write_cb, &free_list);
while (!list_empty(&free_list)) {
client_stat = list_entry(free_list.next, struct nid_stat,
new_stat->nid_obd = exp->exp_obd;
atomic_set(&new_stat->nid_exp_ref_count, 0);
- old_stat = lustre_hash_findadd_unique(obd->obd_nid_stats_hash,
- nid, &new_stat->nid_hash);
+ old_stat = cfs_hash_findadd_unique(obd->obd_nid_stats_hash,
+ nid, &new_stat->nid_hash);
CDEBUG(D_INFO, "Found stats %p for nid %s - ref %d\n",
old_stat, libcfs_nid2str(*nid),
atomic_read(&new_stat->nid_exp_ref_count));
nidstat_putref(exp->exp_nid_stats);
exp->exp_nid_stats = old_stat;
} else {
- /* lustre_hash_findadd_unique() has added
+ /* cfs_hash_findadd_unique() has added
* old_stat's refcount */
nidstat_putref(old_stat);
}
destroy_new_ns:
if (new_stat->nid_proc != NULL)
lprocfs_remove(&new_stat->nid_proc);
- lustre_hash_del(obd->obd_nid_stats_hash, nid, &new_stat->nid_hash);
+ cfs_hash_del(obd->obd_nid_stats_hash, nid, &new_stat->nid_hash);
destroy_new:
OBD_FREE_PTR(new_stat);
if (obd == NULL)
return 0;
- c += lustre_hash_debug_header(page, count);
- c += lustre_hash_debug_str(obd->obd_uuid_hash, page + c, count - c);
- c += lustre_hash_debug_str(obd->obd_nid_hash, page + c, count - c);
- c += lustre_hash_debug_str(obd->obd_nid_stats_hash, page+c, count-c);
+ c += cfs_hash_debug_header(page, count);
+ c += cfs_hash_debug_str(obd->obd_uuid_hash, page + c, count - c);
+ c += cfs_hash_debug_str(obd->obd_nid_hash, page + c, count - c);
+ c += cfs_hash_debug_str(obd->obd_nid_stats_hash, page+c, count-c);
return c;
}
#include <lprocfs_status.h>
#include <libcfs/list.h>
#include <lustre_param.h>
-#include <class_hash.h>
-static lustre_hash_ops_t uuid_hash_ops;
-static lustre_hash_ops_t nid_hash_ops;
-static lustre_hash_ops_t nid_stat_hash_ops;
+static cfs_hash_ops_t uuid_hash_ops;
+static cfs_hash_ops_t nid_hash_ops;
+static cfs_hash_ops_t nid_stat_hash_ops;
/*********** string parsing utils *********/
spin_unlock(&obd->obd_dev_lock);
/* create an uuid-export lustre hash */
- obd->obd_uuid_hash = lustre_hash_init("UUID_HASH",
- HASH_UUID_CUR_BITS,
- HASH_UUID_MAX_BITS,
- &uuid_hash_ops, 0);
+ obd->obd_uuid_hash = cfs_hash_create("UUID_HASH",
+ HASH_UUID_CUR_BITS,
+ HASH_UUID_CUR_BITS,
+ &uuid_hash_ops, 0);
if (!obd->obd_uuid_hash)
GOTO(err_hash, err = -ENOMEM);
/* create a nid-export lustre hash */
- obd->obd_nid_hash = lustre_hash_init("NID_HASH",
- HASH_NID_CUR_BITS,
- HASH_NID_MAX_BITS,
- &nid_hash_ops, 0);
+ obd->obd_nid_hash = cfs_hash_create("NID_HASH",
+ HASH_NID_CUR_BITS,
+ HASH_NID_CUR_BITS,
+ &nid_hash_ops, 0);
if (!obd->obd_nid_hash)
GOTO(err_hash, err = -ENOMEM);
/* create a nid-stats lustre hash */
- obd->obd_nid_stats_hash = lustre_hash_init("NID_STATS",
- HASH_NID_STATS_CUR_BITS,
- HASH_NID_STATS_MAX_BITS,
- &nid_stat_hash_ops, 0);
+ obd->obd_nid_stats_hash = cfs_hash_create("NID_STATS",
+ HASH_NID_STATS_CUR_BITS,
+ HASH_NID_STATS_CUR_BITS,
+ &nid_stat_hash_ops, 0);
if (!obd->obd_nid_stats_hash)
GOTO(err_hash, err = -ENOMEM);
}
err_hash:
if (obd->obd_uuid_hash) {
- lustre_hash_exit(obd->obd_uuid_hash);
+ cfs_hash_destroy(obd->obd_uuid_hash);
obd->obd_uuid_hash = NULL;
}
if (obd->obd_nid_hash) {
- lustre_hash_exit(obd->obd_nid_hash);
+ cfs_hash_destroy(obd->obd_nid_hash);
obd->obd_nid_hash = NULL;
}
if (obd->obd_nid_stats_hash) {
- lustre_hash_exit(obd->obd_nid_stats_hash);
+ cfs_hash_destroy(obd->obd_nid_stats_hash);
obd->obd_nid_stats_hash = NULL;
}
obd->obd_starting = 0;
/* destroy an uuid-export hash body */
if (obd->obd_uuid_hash) {
- lustre_hash_exit(obd->obd_uuid_hash);
+ cfs_hash_destroy(obd->obd_uuid_hash);
obd->obd_uuid_hash = NULL;
}
/* destroy a nid-export hash body */
if (obd->obd_nid_hash) {
- lustre_hash_exit(obd->obd_nid_hash);
+ cfs_hash_destroy(obd->obd_nid_hash);
obd->obd_nid_hash = NULL;
}
/* destroy a nid-stats hash body */
if (obd->obd_nid_stats_hash) {
- lustre_hash_exit(obd->obd_nid_stats_hash);
+ cfs_hash_destroy(obd->obd_nid_stats_hash);
obd->obd_nid_stats_hash = NULL;
}
*/
static unsigned
-uuid_hash(lustre_hash_t *lh, void *key, unsigned mask)
+uuid_hash(cfs_hash_t *hs, void *key, unsigned mask)
{
- return lh_djb2_hash(((struct obd_uuid *)key)->uuid,
- sizeof(((struct obd_uuid *)key)->uuid), mask);
+ return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
+ sizeof(((struct obd_uuid *)key)->uuid), mask);
}
static void *
RETURN(exp);
}
-static lustre_hash_ops_t uuid_hash_ops = {
- .lh_hash = uuid_hash,
- .lh_key = uuid_key,
- .lh_compare = uuid_compare,
- .lh_get = uuid_export_get,
- .lh_put = uuid_export_put,
+static cfs_hash_ops_t uuid_hash_ops = {
+ .hs_hash = uuid_hash,
+ .hs_key = uuid_key,
+ .hs_compare = uuid_compare,
+ .hs_get = uuid_export_get,
+ .hs_put = uuid_export_put,
};
*/
static unsigned
-nid_hash(lustre_hash_t *lh, void *key, unsigned mask)
+nid_hash(cfs_hash_t *hs, void *key, unsigned mask)
{
- return lh_djb2_hash(key, sizeof(lnet_nid_t), mask);
+ return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
}
static void *
RETURN(exp);
}
-static lustre_hash_ops_t nid_hash_ops = {
- .lh_hash = nid_hash,
- .lh_key = nid_key,
- .lh_compare = nid_compare,
- .lh_get = nid_export_get,
- .lh_put = nid_export_put,
+static cfs_hash_ops_t nid_hash_ops = {
+ .hs_hash = nid_hash,
+ .hs_key = nid_key,
+ .hs_compare = nid_compare,
+ .hs_get = nid_export_get,
+ .hs_put = nid_export_put,
};
RETURN(ns);
}
-static lustre_hash_ops_t nid_stat_hash_ops = {
- .lh_hash = nid_hash,
- .lh_key = nidstats_key,
- .lh_compare = nidstats_compare,
- .lh_get = nidstats_get,
- .lh_put = nidstats_put,
+static cfs_hash_ops_t nid_stat_hash_ops = {
+ .hs_hash = nid_hash,
+ .hs_key = nidstats_key,
+ .hs_compare = nidstats_compare,
+ .hs_get = nidstats_get,
+ .hs_put = nidstats_put,
};
#endif
#include "ptlrpc_internal.h"
-#include <class_hash.h>
-static lustre_hash_t *conn_hash = NULL;
-static lustre_hash_ops_t conn_hash_ops;
+static cfs_hash_t *conn_hash = NULL;
+static cfs_hash_ops_t conn_hash_ops;
struct ptlrpc_connection *
ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
struct ptlrpc_connection *conn, *conn2;
ENTRY;
- conn = lustre_hash_lookup(conn_hash, &peer);
+ conn = cfs_hash_lookup(conn_hash, &peer);
if (conn)
GOTO(out, conn);
* connection. The object which exists in the has will be
* returned and may be compared against out object.
*/
- conn2 = lustre_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
+ conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
if (conn != conn2) {
OBD_FREE_PTR(conn);
conn = conn2;
{
ENTRY;
- conn_hash = lustre_hash_init("CONN_HASH",
- HASH_CONN_CUR_BITS,
- HASH_CONN_MAX_BITS,
- &conn_hash_ops, LH_REHASH);
+ conn_hash = cfs_hash_create("CONN_HASH",
+ HASH_CONN_CUR_BITS,
+ HASH_CONN_MAX_BITS,
+ &conn_hash_ops, CFS_HASH_REHASH);
if (!conn_hash)
RETURN(-ENOMEM);
void ptlrpc_connection_fini(void) {
ENTRY;
- lustre_hash_exit(conn_hash);
+ cfs_hash_destroy(conn_hash);
EXIT;
}
* Hash operations for net_peer<->connection
*/
static unsigned
-conn_hashfn(lustre_hash_t *lh, void *key, unsigned mask)
+conn_hashfn(cfs_hash_t *hs, void *key, unsigned mask)
{
- return lh_djb2_hash(key, sizeof(lnet_process_id_t), mask);
+ return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
}
static int
OBD_FREE_PTR(conn);
}
-static lustre_hash_ops_t conn_hash_ops = {
- .lh_hash = conn_hashfn,
- .lh_compare = conn_compare,
- .lh_key = conn_key,
- .lh_get = conn_get,
- .lh_put = conn_put,
- .lh_exit = conn_exit,
+static cfs_hash_ops_t conn_hash_ops = {
+ .hs_hash = conn_hashfn,
+ .hs_compare = conn_compare,
+ .hs_key = conn_key,
+ .hs_get = conn_get,
+ .hs_put = conn_put,
+ .hs_exit = conn_exit,
};
#include <obd_ost.h>
#include <lustre_fsfilt.h>
#include <linux/lustre_quota.h>
-#include <class_hash.h>
#include "quota_internal.h"
#ifdef HAVE_QUOTA_SUPPORT
if (!qctxt->lqc_valid)
rc = -EBUSY;
else
- rc = lustre_hash_add_unique(qctxt->lqc_lqs_hash,
- &lqs->lqs_key, &lqs->lqs_hash);
+ rc = cfs_hash_add_unique(qctxt->lqc_lqs_hash,
+ &lqs->lqs_key, &lqs->lqs_hash);
spin_unlock(&qctxt->lqc_lock);
if (!rc)
int rc = 0;
search_lqs:
- lqs = lustre_hash_lookup(qctxt->lqc_lqs_hash, &lqs_key);
+ lqs = cfs_hash_lookup(qctxt->lqc_lqs_hash, &lqs_key);
if (IS_ERR(lqs))
GOTO(out, rc = PTR_ERR(lqs));
#include <obd_class.h>
#include <lustre_quota.h>
#include <lustre_fsfilt.h>
-#include <class_hash.h>
#include <lprocfs_status.h>
#include "quota_internal.h"
#ifdef HAVE_QUOTA_SUPPORT
-static lustre_hash_ops_t lqs_hash_ops;
+static cfs_hash_ops_t lqs_hash_ops;
unsigned long default_bunit_sz = 128 * 1024 * 1024; /* 128M bytes */
unsigned long default_btune_ratio = 50; /* 50 percentage */
qctxt->lqc_sync_blk = 0;
spin_unlock(&qctxt->lqc_lock);
- qctxt->lqc_lqs_hash = lustre_hash_init("LQS_HASH",
- HASH_LQS_CUR_BITS,
- HASH_LQS_MAX_BITS,
- &lqs_hash_ops, 0);
+ qctxt->lqc_lqs_hash = cfs_hash_create("LQS_HASH",
+ HASH_LQS_CUR_BITS,
+ HASH_LQS_MAX_BITS,
+ &lqs_hash_ops, CFS_HASH_REHASH);
if (!qctxt->lqc_lqs_hash) {
CERROR("initialize hash lqs for %s error!\n", obd->obd_name);
RETURN(-ENOMEM);
cfs_time_seconds(1));
}
- lustre_hash_for_each_safe(qctxt->lqc_lqs_hash, hash_put_lqs, NULL);
+ cfs_hash_for_each_safe(qctxt->lqc_lqs_hash, hash_put_lqs, NULL);
l_wait_event(qctxt->lqc_lqs_waitq, check_lqs(qctxt), &lwi);
down_write(&obt->obt_rwsem);
- lustre_hash_exit(qctxt->lqc_lqs_hash);
+ cfs_hash_destroy(qctxt->lqc_lqs_hash);
qctxt->lqc_lqs_hash = NULL;
up_write(&obt->obt_rwsem);
* string hashing using djb2 hash algorithm
*/
static unsigned
-lqs_hash(lustre_hash_t *lh, void *key, unsigned mask)
+lqs_hash(cfs_hash_t *hs, void *key, unsigned mask)
{
struct quota_adjust_qunit *lqs_key;
unsigned hash;
EXIT;
}
-static lustre_hash_ops_t lqs_hash_ops = {
- .lh_hash = lqs_hash,
- .lh_compare = lqs_compare,
- .lh_get = lqs_get,
- .lh_put = lqs_put,
- .lh_exit = lqs_exit
+static cfs_hash_ops_t lqs_hash_ops = {
+ .hs_hash = lqs_hash,
+ .hs_compare = lqs_compare,
+ .hs_get = lqs_get,
+ .hs_put = lqs_put,
+ .hs_exit = lqs_exit
};
#endif /* HAVE_QUOTA_SUPPORT */
}
#define POP_ARG() (pop_arg(argc, argv))
-#define min(a,b) ((a)>(b)?(b):(a))
int main(int argc, char **argv)
{