* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
static __u64 handle_base;
#define HANDLE_INCR 7
-static spinlock_t handle_base_lock;
+static DEFINE_SPINLOCK(handle_base_lock);
static struct handle_bucket {
- spinlock_t lock;
+ spinlock_t lock;
struct list_head head;
} *handle_hash;
void class_handle_hash(struct portals_handle *h,
struct portals_handle_ops *ops)
{
- struct handle_bucket *bucket;
- ENTRY;
+ struct handle_bucket *bucket;
+
+ ENTRY;
- LASSERT(h != NULL);
+ LASSERT(h != NULL);
LASSERT(list_empty(&h->h_link));
- /*
- * This is fast, but simplistic cookie generation algorithm, it will
- * need a re-do at some point in the future for security.
- */
+ /*
+ * This is fast, but simplistic cookie generation algorithm, it will
+ * need a re-do at some point in the future for security.
+ */
spin_lock(&handle_base_lock);
handle_base += HANDLE_INCR;
{
if (list_empty(&h->h_link)) {
CERROR("removing an already-removed handle (%#llx)\n",
- h->h_cookie);
- return;
- }
+ h->h_cookie);
+ return;
+ }
CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n",
- h, h->h_cookie);
+ h, h->h_cookie);
spin_lock(&h->h_lock);
if (h->h_in == 0) {
void *class_handle2object(__u64 cookie, const void *owner)
{
- struct handle_bucket *bucket;
- struct portals_handle *h;
- void *retval = NULL;
- ENTRY;
+ struct handle_bucket *bucket;
+ struct portals_handle *h;
+ void *retval = NULL;
+
+ ENTRY;
- LASSERT(handle_hash != NULL);
+ LASSERT(handle_hash != NULL);
- /* Be careful when you want to change this code. See the
- * rcu_read_lock() definition on top this file. - jxiong */
- bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
+ /*
+ * Be careful when you want to change this code. See the
+ * rcu_read_lock() definition on top this file. - jxiong
+ */
+ bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
- rcu_read_lock();
- list_for_each_entry_rcu(h, &bucket->head, h_link) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &bucket->head, h_link) {
if (h->h_cookie != cookie || h->h_owner != owner)
- continue;
+ continue;
spin_lock(&h->h_lock);
if (likely(h->h_in != 0)) {
int class_handle_init(void)
{
- struct handle_bucket *bucket;
- struct timeval tv;
- int seed[2];
+ struct handle_bucket *bucket;
- LASSERT(handle_hash == NULL);
+ LASSERT(handle_hash == NULL);
- OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
- if (handle_hash == NULL)
- return -ENOMEM;
+ OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
+ if (handle_hash == NULL)
+ return -ENOMEM;
- spin_lock_init(&handle_base_lock);
for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
bucket--) {
INIT_LIST_HEAD(&bucket->head);
spin_lock_init(&bucket->lock);
}
- /** bug 21430: add randomness to the initial base */
- cfs_get_random_bytes(seed, sizeof(seed));
- do_gettimeofday(&tv);
- cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
-
- cfs_get_random_bytes(&handle_base, sizeof(handle_base));
- LASSERT(handle_base != 0ULL);
+ cfs_get_random_bytes(&handle_base, sizeof(handle_base));
+ LASSERT(handle_base != 0ULL);
- return 0;
+ return 0;
}
static int cleanup_all_handles(void)
void class_handle_cleanup(void)
{
- int count;
- LASSERT(handle_hash != NULL);
+ int count;
+
+ LASSERT(handle_hash != NULL);
- count = cleanup_all_handles();
+ count = cleanup_all_handles();
- OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
- handle_hash = NULL;
+ OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
+ handle_hash = NULL;
- if (count != 0)
- CERROR("handle_count at cleanup: %d\n", count);
+ if (count != 0)
+ CERROR("handle_count at cleanup: %d\n", count);
}