* Copyright (C) 2002 Cluster File Systems, Inc.
* Author: Phil Schwan <phil@clusterfs.com>
*
- * This file is part of Lustre, http://www.lustre.org/
+ * This file is part of the Lustre file system, http://www.lustre.org
+ * Lustre is a trademark of Cluster File Systems, Inc.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * You may have signed or agreed to another license before downloading
+ * this software. If so, you are bound by the terms and conditions
+ * of that agreement, and the following does not apply to you. See the
+ * LICENSE file included with this distribution for more information.
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * If you did not agree to a different license, then this copy of Lustre
+ * is open source software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * In either case, Lustre is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * license text for more details.
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#ifdef __KERNEL__
-# include <linux/types.h>
-# include <linux/random.h>
-#else
+#ifndef __KERNEL__
# include <liblustre.h>
-#endif
+#endif
-#include <linux/obd_support.h>
-#include <linux/lustre_handles.h>
+#include <obd_support.h>
+#include <lustre_handles.h>
+#include <lustre_lib.h>
-static spinlock_t handle_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t random_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t handle_lock;
+static __u64 handle_base;
+#define HANDLE_INCR 7
static struct list_head *handle_hash = NULL;
static int handle_count = 0;
#define HANDLE_HASH_SIZE (1 << 14)
#define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
+/*
+ * Generate a unique 64bit cookie (hash) for a handle and insert it into
+ * global (per-node) hash-table.
+ */
void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
{
struct list_head *bucket;
LASSERT(h != NULL);
LASSERT(list_empty(&h->h_link));
- /* My hypothesis is that get_random_bytes, if called from two threads at
- * the same time, will return the same bytes. -phil */
- spin_lock(&random_lock);
- get_random_bytes(&h->h_cookie, sizeof(h->h_cookie));
- spin_unlock(&random_lock);
+ spin_lock(&handle_lock);
- h->h_addref = cb;
+ /*
+ * This is fast, but simplistic cookie generation algorithm, it will
+ * need a re-do at some point in the future for security.
+ */
+ h->h_cookie = handle_base;
+ handle_base += HANDLE_INCR;
bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
-
- CDEBUG(D_INFO, "adding object %p with handle "LPX64" to hash\n",
- h, h->h_cookie);
-
- spin_lock(&handle_lock);
list_add(&h->h_link, bucket);
handle_count++;
+
+ if (unlikely(handle_base == 0)) {
+ /*
+ * Cookie of zero is "dangerous", because in many places it's
+ * assumed that 0 means "unassigned" handle, not bound to any
+ * object.
+ */
+ CWARN("The universe has been exhausted: cookie wrap-around.\n");
+ handle_base += HANDLE_INCR;
+ }
+
spin_unlock(&handle_lock);
+
+ h->h_addref = cb;
+ CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
+ h, h->h_cookie);
EXIT;
}
static void class_handle_unhash_nolock(struct portals_handle *h)
{
- LASSERT(!list_empty(&h->h_link));
+ if (list_empty(&h->h_link)) {
+ CERROR("removing an already-removed handle ("LPX64")\n",
+ h->h_cookie);
+ return;
+ }
CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
h, h->h_cookie);
LASSERT(handle_hash != NULL);
- spin_lock(&handle_lock);
bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
+ spin_lock(&handle_lock);
list_for_each(tmp, bucket) {
struct portals_handle *h;
h = list_entry(tmp, struct portals_handle, h_link);
for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
bucket--)
- INIT_LIST_HEAD(bucket);
+ CFS_INIT_LIST_HEAD(bucket);
+
+ ll_get_random_bytes(&handle_base, sizeof(handle_base));
+ LASSERT(handle_base != 0ULL);
return 0;
}
struct portals_handle *h;
h = list_entry(tmp, struct portals_handle, h_link);
- CERROR("forcing cleanup for handle "LPX64"\n",
- h->h_cookie);
+ CERROR("force clean handle "LPX64" addr %p addref %p\n",
+ h->h_cookie, h, h->h_addref);
class_handle_unhash_nolock(h);
}