--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * Copyright (C) 2002 Cluster File Systems, Inc.
+ * Author: Phil Schwan <phil@clusterfs.com>
+ *
+ * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
+ *
+ * Portals is free software; you can redistribute it and/or
+ * modify it under the terms of version 2.1 of the GNU Lesser General
+ * Public License as published by the Free Software Foundation.
+ *
+ * Portals is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Portals; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/random.h>
+
+#define DEBUG_SUBSYSTEM S_PORTALS
+
+#include <linux/kp30.h>
+#include <linux/lustre_handles.h>
+
+static spinlock_t handle_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t random_lock = SPIN_LOCK_UNLOCKED;
+static struct list_head *handle_hash = NULL;
+static int handle_count = 0;
+
+#define HANDLE_HASH_SIZE (1 << 14)
+#define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
+
+void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
+{
+ struct list_head *bucket;
+ ENTRY;
+
+ LASSERT(h != NULL);
+ LASSERT(list_empty(&h->h_link));
+
+ /* My hypothesis is that get_random_bytes, if called from two threads at
+ * the same time, will return the same bytes. -phil */
+ spin_lock(&random_lock);
+ get_random_bytes(&h->h_cookie, sizeof(h->h_cookie));
+ spin_unlock(&random_lock);
+
+ h->h_addref = cb;
+
+ bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
+
+ CDEBUG(D_INFO, "adding object %p with handle "LPX64" to hash\n",
+ h, h->h_cookie);
+
+ spin_lock(&handle_lock);
+ list_add(&h->h_link, bucket);
+ handle_count++;
+ spin_unlock(&handle_lock);
+ EXIT;
+}
+
+static void class_handle_unhash_nolock(struct portals_handle *h)
+{
+ LASSERT(!list_empty(&h->h_link));
+
+ CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
+ h, h->h_cookie);
+
+ handle_count--;
+ list_del_init(&h->h_link);
+}
+
+void class_handle_unhash(struct portals_handle *h)
+{
+ spin_lock(&handle_lock);
+ class_handle_unhash_nolock(h);
+ spin_unlock(&handle_lock);
+}
+
+void *class_handle2object(__u64 cookie)
+{
+ struct list_head *bucket, *tmp;
+ void *retval = NULL;
+ ENTRY;
+
+ LASSERT(handle_hash != NULL);
+
+ spin_lock(&handle_lock);
+ bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
+
+ list_for_each(tmp, bucket) {
+ struct portals_handle *h;
+ h = list_entry(tmp, struct portals_handle, h_link);
+
+ if (h->h_cookie == cookie) {
+ h->h_addref(h);
+ retval = h;
+ break;
+ }
+ }
+ spin_unlock(&handle_lock);
+
+ RETURN(retval);
+}
+
+int class_handle_init(void)
+{
+ struct list_head *bucket;
+
+ LASSERT(handle_hash == NULL);
+
+ PORTAL_ALLOC(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
+ if (handle_hash == NULL)
+ return -ENOMEM;
+
+ for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
+ bucket--)
+ INIT_LIST_HEAD(bucket);
+
+ return 0;
+}
+
+static void cleanup_all_handles(void)
+{
+ int i;
+
+ spin_lock(&handle_lock);
+ for (i = 0; i < HANDLE_HASH_SIZE; i++) {
+ struct list_head *tmp, *pos;
+ list_for_each_safe(tmp, pos, &(handle_hash[i])) {
+ struct portals_handle *h;
+ h = list_entry(tmp, struct portals_handle, h_link);
+
+ CERROR("forcing cleanup for handle "LPX64"\n",
+ h->h_cookie);
+
+ class_handle_unhash_nolock(h);
+ }
+ }
+ spin_lock(&handle_lock);
+}
+
+void class_handle_cleanup(void)
+{
+ LASSERT(handle_hash != NULL);
+
+ if (handle_count != 0) {
+ CERROR("handle_count at cleanup: %d\n", handle_count);
+ cleanup_all_handles();
+ }
+
+ PORTAL_FREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
+ handle_hash = NULL;
+
+ if (handle_count)
+ CERROR("leaked %d handles\n", handle_count);
+}
--- /dev/null
+
+
+
+struct uuid_nid_data {
+ struct list_head head;
+ char *uuid;
+ __u32 nid;
+ __u32 nal;
+ ptl_handle_ni_t ni;
+};
+
+
+/* FIXME: This should probably become more elegant than a global linked list */
+static struct list_head g_uuid_list;
+static spinlock_t g_uuid_lock;
+
+
+int lustre_uuid_to_peer(char *uuid, struct lustre_peer *peer)
+{
+ struct list_head *tmp;
+
+ spin_lock (&g_uuid_lock);
+
+ list_for_each(tmp, &g_uuid_list) {
+ struct uuid_nid_data *data =
+ list_entry(tmp, struct uuid_nid_data, head);
+
+ if (strcmp(data->uuid, uuid) == 0) {
+ peer->peer_nid = data->nid;
+ peer->peer_ni = data->ni;
+
+ spin_unlock (&g_uuid_lock);
+ return 0;
+ }
+ }
+
+ spin_unlock (&g_uuid_lock);
+ return -1;
+}
+
+/* delete only one entry if uuid is specified, otherwise delete all */
+static int lustre_add_uuid(char *uuid, __u64 nid, __u32 nal)
+{
+ const ptl_handle_ni_t *nip;
+ struct uuid_nid_data *data;
+ int rc;
+ int nob = strnlen (uuid, PAGE_SIZE) + 1;
+
+ if (nob > PAGE_SIZE)
+ return -EINVAL;
+
+ nip = lustre_get_ni (nal);
+ if (nip == NULL) {
+ CERROR("get_ni failed: is the NAL module loaded?\n");
+ return -EIO;
+ }
+
+ rc = -ENOMEM;
+ PORTAL_ALLOC(data, sizeof(*data));
+ if (data == NULL)
+ goto fail_0;
+
+ PORTAL_ALLOC(data->uuid, nob);
+ if (data == NULL)
+ goto fail_1;
+
+ memcpy(data->uuid, uuid, nob);
+ data->nid = nid;
+ data->nal = nal;
+ data->ni = *nip;
+
+ spin_lock (&g_uuid_lock);
+
+ list_add(&data->head, &g_uuid_list);
+
+ spin_unlock (&g_uuid_lock);
+
+ return 0;
+
+ fail_1:
+ PORTAL_FREE (data, sizeof (*data));
+ fail_0:
+ lustre_put_ni (nal);
+ return (rc);
+}
+
+static int lustre_del_uuid (char *uuid)
+{
+ struct list_head deathrow;
+ struct list_head *tmp;
+ struct list_head *n;
+ struct uuid_nid_data *data;
+
+ INIT_LIST_HEAD (&deathrow);
+
+ spin_lock (&g_uuid_lock);
+
+ list_for_each_safe(tmp, n, &g_uuid_list) {
+ data = list_entry(tmp, struct uuid_nid_data, head);
+
+ if (uuid == NULL || strcmp(data->uuid, uuid) == 0) {
+ list_del (&data->head);
+ list_add (&data->head, &deathrow);
+ if (uuid)
+ break;
+ }
+ }
+
+ spin_unlock (&g_uuid_lock);
+
+ if (list_empty (&deathrow))
+ return -EINVAL;
+
+ do {
+ data = list_entry(deathrow.next, struct uuid_nid_data, head);
+
+ list_del (&data->head);
+
+ lustre_put_ni (data->nal);
+ PORTAL_FREE(data->uuid, strlen(data->uuid) + 1);
+ PORTAL_FREE(data, sizeof(*data));
+ } while (!list_empty (&deathrow));
+
+ return 0;
+}