-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
*
- * Copyright (C) 2002 Cluster File Systems, Inc.
- * Author: Phil Schwan <phil@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * This file is part of Lustre, http://www.lustre.org/
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2017, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/lustre_handles.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#ifdef __KERNEL__
-# include <linux/types.h>
-# include <linux/random.h>
-#else
-# include <liblustre.h>
-#endif
-#include <linux/obd_support.h>
-#include <linux/lustre_handles.h>
+#include <linux/random.h>
+
+#include <obd_support.h>
+#include <lustre_handles.h>
+#include <lustre_lib.h>
+
-static spinlock_t handle_lock = SPIN_LOCK_UNLOCKED;
static __u64 handle_base;
#define HANDLE_INCR 7
-static struct list_head *handle_hash = NULL;
-static int handle_count = 0;
+static DEFINE_SPINLOCK(handle_base_lock);
+
+static struct handle_bucket {
+ spinlock_t lock;
+ struct hlist_head head;
+} *handle_hash;
-#define HANDLE_HASH_SIZE (1 << 14)
+#define HANDLE_HASH_SIZE (1 << 16)
#define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
-void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
+/*
+ * Generate a unique 64bit cookie (hash) for a handle and insert it into
+ * global (per-node) hash-table.
+ */
+void class_handle_hash(struct portals_handle *h, const char *owner)
{
- struct list_head *bucket;
- ENTRY;
-
- LASSERT(h != NULL);
- LASSERT(list_empty(&h->h_link));
-
- spin_lock(&handle_lock);
- h->h_cookie = handle_base;
- handle_base += HANDLE_INCR;
- spin_unlock(&handle_lock);
-
- h->h_addref = cb;
- bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
- CDEBUG(D_INFO, "adding object %p with handle "LPX64" to hash\n",
- h, h->h_cookie);
-
- spin_lock(&handle_lock);
- list_add(&h->h_link, bucket);
- handle_count++;
- spin_unlock(&handle_lock);
- EXIT;
+ struct handle_bucket *bucket;
+
+ ENTRY;
+
+ LASSERT(h != NULL);
+ LASSERT(hlist_unhashed(&h->h_link));
+
+ /*
+ * This is fast, but simplistic cookie generation algorithm, it will
+ * need a re-do at some point in the future for security.
+ */
+ spin_lock(&handle_base_lock);
+ handle_base += HANDLE_INCR;
+
+ if (unlikely(handle_base == 0)) {
+ /*
+ * Cookie of zero is "dangerous", because in many places it's
+ * assumed that 0 means "unassigned" handle, not bound to any
+ * object.
+ */
+ CWARN("The universe has been exhausted: cookie wrap-around.\n");
+ handle_base += HANDLE_INCR;
+ }
+ h->h_cookie = handle_base;
+ spin_unlock(&handle_base_lock);
+
+ h->h_owner = owner;
+
+ bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
+ spin_lock(&bucket->lock);
+ hlist_add_head_rcu(&h->h_link, &bucket->head);
+ spin_unlock(&bucket->lock);
+
+ CDEBUG(D_INFO, "added object %p with handle %#llx to hash\n",
+ h, h->h_cookie);
+ EXIT;
}
+EXPORT_SYMBOL(class_handle_hash);
static void class_handle_unhash_nolock(struct portals_handle *h)
{
- if (list_empty(&h->h_link)) {
- CERROR("removing an already-removed handle ("LPX64")\n",
- h->h_cookie);
- return;
- }
+ if (hlist_unhashed(&h->h_link)) {
+ CERROR("removing an already-removed handle (%#llx)\n",
+ h->h_cookie);
+ return;
+ }
- CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
- h, h->h_cookie);
+ CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n",
+ h, h->h_cookie);
- handle_count--;
- list_del_init(&h->h_link);
+ hlist_del_init_rcu(&h->h_link);
}
void class_handle_unhash(struct portals_handle *h)
{
- spin_lock(&handle_lock);
- class_handle_unhash_nolock(h);
- spin_unlock(&handle_lock);
+ struct handle_bucket *bucket;
+ bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
+
+ spin_lock(&bucket->lock);
+ class_handle_unhash_nolock(h);
+ spin_unlock(&bucket->lock);
+}
+EXPORT_SYMBOL(class_handle_unhash);
+
+void *class_handle2object(u64 cookie, const char *owner)
+{
+ struct handle_bucket *bucket;
+ struct portals_handle *h;
+ void *retval = NULL;
+
+ ENTRY;
+
+ LASSERT(handle_hash != NULL);
+
+ /*
+ * Be careful when you want to change this code. See the
+ * rcu_read_lock() definition on top this file. - jxiong
+ */
+ bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(h, &bucket->head, h_link) {
+ if (h->h_cookie != cookie || h->h_owner != owner)
+ continue;
+
+ if (refcount_inc_not_zero(&h->h_ref)) {
+ CDEBUG(D_INFO, "GET %s %p refcount=%d\n",
+ h->h_owner, h,
+ refcount_read(&h->h_ref));
+ retval = h;
+ }
+ break;
+ }
+ rcu_read_unlock();
+
+ RETURN(retval);
}
+EXPORT_SYMBOL(class_handle2object);
-void *class_handle2object(__u64 cookie)
+int class_handle_init(void)
{
- struct list_head *bucket, *tmp;
- void *retval = NULL;
- ENTRY;
+ struct handle_bucket *bucket;
- LASSERT(handle_hash != NULL);
+ LASSERT(handle_hash == NULL);
- bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
+ OBD_ALLOC_PTR_ARRAY_LARGE(handle_hash, HANDLE_HASH_SIZE);
+ if (handle_hash == NULL)
+ return -ENOMEM;
- spin_lock(&handle_lock);
- list_for_each(tmp, bucket) {
- struct portals_handle *h;
- h = list_entry(tmp, struct portals_handle, h_link);
+ for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
+ bucket--) {
+ INIT_HLIST_HEAD(&bucket->head);
+ spin_lock_init(&bucket->lock);
+ }
- if (h->h_cookie == cookie) {
- h->h_addref(h);
- retval = h;
- break;
- }
- }
- spin_unlock(&handle_lock);
+ get_random_bytes(&handle_base, sizeof(handle_base));
+ LASSERT(handle_base != 0ULL);
- RETURN(retval);
+ return 0;
}
-int class_handle_init(void)
+static int cleanup_all_handles(void)
{
- struct list_head *bucket;
+ int rc;
+ int i;
- LASSERT(handle_hash == NULL);
+ for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
+ struct portals_handle *h;
- OBD_VMALLOC(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
- if (handle_hash == NULL)
- return -ENOMEM;
+ spin_lock(&handle_hash[i].lock);
+ hlist_for_each_entry_rcu(h, &handle_hash[i].head, h_link) {
+ CERROR("force clean handle %#llx addr %p owner %p\n",
+ h->h_cookie, h, h->h_owner);
- for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
- bucket--)
- INIT_LIST_HEAD(bucket);
+ class_handle_unhash_nolock(h);
+ rc++;
+ }
+ spin_unlock(&handle_hash[i].lock);
+ }
- get_random_bytes(&handle_base, sizeof(handle_base));
- LASSERT(handle_base != 0ULL);
-
- return 0;
-}
-
-static void cleanup_all_handles(void)
-{
- int i;
-
- spin_lock(&handle_lock);
- for (i = 0; i < HANDLE_HASH_SIZE; i++) {
- struct list_head *tmp, *pos;
- list_for_each_safe(tmp, pos, &(handle_hash[i])) {
- struct portals_handle *h;
- h = list_entry(tmp, struct portals_handle, h_link);
-
- CERROR("force clean handle "LPX64" addr %p addref %p\n",
- h->h_cookie, h, h->h_addref);
-
- class_handle_unhash_nolock(h);
- }
- }
- spin_unlock(&handle_lock);
+ return rc;
}
void class_handle_cleanup(void)
{
- LASSERT(handle_hash != NULL);
+ int count;
+
+ LASSERT(handle_hash != NULL);
- if (handle_count != 0) {
- CERROR("handle_count at cleanup: %d\n", handle_count);
- cleanup_all_handles();
- }
+ count = cleanup_all_handles();
- OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
- handle_hash = NULL;
+ OBD_FREE_PTR_ARRAY_LARGE(handle_hash, HANDLE_HASH_SIZE);
+ handle_hash = NULL;
- if (handle_count)
- CERROR("leaked %d handles\n", handle_count);
+ if (count != 0)
+ CERROR("handle_count at cleanup: %d\n", count);
}