-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <lustre_lib.h>
#if !defined(HAVE_RCU) || !defined(__KERNEL__)
-# define list_add_rcu list_add
-# define list_del_rcu list_del
-# define list_for_each_rcu list_for_each
-# define list_for_each_safe_rcu list_for_each_safe
-# define rcu_read_lock() spin_lock(&bucket->lock)
-# define rcu_read_unlock() spin_unlock(&bucket->lock)
+# define list_add_rcu cfs_list_add
+# define list_del_rcu cfs_list_del
+# define list_for_each_rcu cfs_list_for_each
+# define list_for_each_safe_rcu cfs_list_for_each_safe
+# define list_for_each_entry_rcu cfs_list_for_each_entry
+# define rcu_read_lock() cfs_spin_lock(&bucket->lock)
+# define rcu_read_unlock() cfs_spin_unlock(&bucket->lock)
#endif /* ifndef HAVE_RCU */
static __u64 handle_base;
#define HANDLE_INCR 7
-static spinlock_t handle_base_lock;
+static cfs_spinlock_t handle_base_lock;
static struct handle_bucket {
- spinlock_t lock;
- struct list_head head;
+ cfs_spinlock_t lock;
+ cfs_list_t head;
} *handle_hash;
-static atomic_t handle_count = ATOMIC_INIT(0);
-
#ifdef __arch_um__
/* For unknown reason, UML uses kmalloc rather than vmalloc to allocate
* memory(OBD_VMALLOC). Therefore, we have to redefine the
*/
#define HANDLE_HASH_SIZE 4096
#else
-#define HANDLE_HASH_SIZE (1 << 14)
+#define HANDLE_HASH_SIZE (1 << 16)
#endif /* ifdef __arch_um__ */
#define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
* Generate a unique 64bit cookie (hash) for a handle and insert it into
* global (per-node) hash-table.
*/
-void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
+void class_handle_hash(struct portals_handle *h,
+ struct portals_handle_ops *ops)
{
struct handle_bucket *bucket;
ENTRY;
LASSERT(h != NULL);
- LASSERT(list_empty(&h->h_link));
+ LASSERT(cfs_list_empty(&h->h_link));
/*
* This is fast, but simplistic cookie generation algorithm, it will
* need a re-do at some point in the future for security.
*/
- spin_lock(&handle_base_lock);
+ cfs_spin_lock(&handle_base_lock);
handle_base += HANDLE_INCR;
h->h_cookie = handle_base;
CWARN("The universe has been exhausted: cookie wrap-around.\n");
handle_base += HANDLE_INCR;
}
- spin_unlock(&handle_base_lock);
-
- atomic_inc(&handle_count);
- h->h_addref = cb;
- spin_lock_init(&h->h_lock);
+ cfs_spin_unlock(&handle_base_lock);
+
+ h->h_ops = ops;
+ cfs_spin_lock_init(&h->h_lock);
bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
- spin_lock(&bucket->lock);
+ cfs_spin_lock(&bucket->lock);
list_add_rcu(&h->h_link, &bucket->head);
h->h_in = 1;
- spin_unlock(&bucket->lock);
+ cfs_spin_unlock(&bucket->lock);
CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
h, h->h_cookie);
static void class_handle_unhash_nolock(struct portals_handle *h)
{
- if (list_empty(&h->h_link)) {
+ if (cfs_list_empty(&h->h_link)) {
CERROR("removing an already-removed handle ("LPX64")\n",
h->h_cookie);
return;
CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
h, h->h_cookie);
- spin_lock(&h->h_lock);
+ cfs_spin_lock(&h->h_lock);
if (h->h_in == 0) {
- spin_unlock(&h->h_lock);
+ cfs_spin_unlock(&h->h_lock);
return;
}
h->h_in = 0;
- spin_unlock(&h->h_lock);
+ cfs_spin_unlock(&h->h_lock);
list_del_rcu(&h->h_link);
}
struct handle_bucket *bucket;
bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
- spin_lock(&bucket->lock);
+ cfs_spin_lock(&bucket->lock);
class_handle_unhash_nolock(h);
- spin_unlock(&bucket->lock);
-
- atomic_dec(&handle_count);
+ cfs_spin_unlock(&bucket->lock);
}
void class_handle_hash_back(struct portals_handle *h)
bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
- atomic_inc(&handle_count);
- spin_lock(&bucket->lock);
+ cfs_spin_lock(&bucket->lock);
list_add_rcu(&h->h_link, &bucket->head);
h->h_in = 1;
- spin_unlock(&bucket->lock);
+ cfs_spin_unlock(&bucket->lock);
EXIT;
}
void *class_handle2object(__u64 cookie)
{
struct handle_bucket *bucket;
- struct list_head *tmp;
+ struct portals_handle *h;
void *retval = NULL;
ENTRY;
bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
rcu_read_lock();
- list_for_each_rcu(tmp, &bucket->head) {
- struct portals_handle *h;
- h = list_entry(tmp, struct portals_handle, h_link);
+ list_for_each_entry_rcu(h, &bucket->head, h_link) {
if (h->h_cookie != cookie)
continue;
- spin_lock(&h->h_lock);
- if (likely(h->h_cookie != 0)) {
- h->h_addref(h);
+ cfs_spin_lock(&h->h_lock);
+ if (likely(h->h_in != 0)) {
+ h->h_ops->hop_addref(h);
retval = h;
}
- spin_unlock(&h->h_lock);
+ cfs_spin_unlock(&h->h_lock);
break;
}
rcu_read_unlock();
RETURN(retval);
}
-void class_handle_free_cb(struct rcu_head *rcu)
+void class_handle_free_cb(cfs_rcu_head_t *rcu)
{
- struct portals_handle *h = RCU2HANDLE(rcu);
- if (h->h_free_cb) {
- h->h_free_cb(h->h_ptr, h->h_size);
- } else {
- void *ptr = h->h_ptr;
- unsigned int size = h->h_size;
- OBD_FREE(ptr, size);
- }
+ struct portals_handle *h = RCU2HANDLE(rcu);
+ void *ptr = (void *)(unsigned long)h->h_cookie;
+
+ if (h->h_ops->hop_free != NULL)
+ h->h_ops->hop_free(ptr, h->h_size);
+ else
+ OBD_FREE(ptr, h->h_size);
}
int class_handle_init(void)
{
struct handle_bucket *bucket;
+ struct timeval tv;
+ int seed[2];
LASSERT(handle_hash == NULL);
- OBD_VMALLOC(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
+ OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
if (handle_hash == NULL)
return -ENOMEM;
- spin_lock_init(&handle_base_lock);
+ cfs_spin_lock_init(&handle_base_lock);
for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
bucket--) {
CFS_INIT_LIST_HEAD(&bucket->head);
- spin_lock_init(&bucket->lock);
+ cfs_spin_lock_init(&bucket->lock);
}
- ll_get_random_bytes(&handle_base, sizeof(handle_base));
+
+ /** bug 21430: add randomness to the initial base */
+ cfs_get_random_bytes(seed, sizeof(seed));
+ cfs_gettimeofday(&tv);
+ cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
+
+ cfs_get_random_bytes(&handle_base, sizeof(handle_base));
LASSERT(handle_base != 0ULL);
return 0;
}
-static void cleanup_all_handles(void)
+static int cleanup_all_handles(void)
{
+ int rc;
int i;
- for (i = 0; i < HANDLE_HASH_SIZE; i++) {
- struct list_head *tmp, *pos;
- spin_lock(&handle_hash[i].lock);
- list_for_each_safe_rcu(tmp, pos, &(handle_hash[i].head)) {
- struct portals_handle *h;
- h = list_entry(tmp, struct portals_handle, h_link);
+ for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
+ struct portals_handle *h;
- CERROR("force clean handle "LPX64" addr %p addref %p\n",
- h->h_cookie, h, h->h_addref);
+ cfs_spin_lock(&handle_hash[i].lock);
+ list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
+ CERROR("force clean handle "LPX64" addr %p ops %p\n",
+ h->h_cookie, h, h->h_ops);
class_handle_unhash_nolock(h);
+ rc++;
}
- spin_unlock(&handle_hash[i].lock);
+ cfs_spin_unlock(&handle_hash[i].lock);
}
+
+ return rc;
}
void class_handle_cleanup(void)
int count;
LASSERT(handle_hash != NULL);
- count = atomic_read(&handle_count);
- if (count != 0) {
- CERROR("handle_count at cleanup: %d\n", count);
- cleanup_all_handles();
- }
+ count = cleanup_all_handles();
- OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
+ OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
handle_hash = NULL;
- if (atomic_read(&handle_count))
- CERROR("leaked %d handles\n", atomic_read(&handle_count));
+ if (count != 0)
+ CERROR("handle_count at cleanup: %d\n", count);
}