1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
5 * Author: Phil Schwan <phil@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org/
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_CLASS
25 # include <linux/types.h>
26 # include <linux/random.h>
28 # include <liblustre.h>
31 #include <linux/obd_support.h>
32 #include <linux/lustre_handles.h>
34 static spinlock_t handle_lock = SPIN_LOCK_UNLOCKED;
35 static spinlock_t random_lock = SPIN_LOCK_UNLOCKED;
36 static struct list_head *handle_hash = NULL;
37 static int handle_count = 0;
39 #define HANDLE_HASH_SIZE (1 << 14)
40 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
42 void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
44 struct list_head *bucket;
48 LASSERT(list_empty(&h->h_link));
50 /* My hypothesis is that get_random_bytes, if called from two threads at
51 * the same time, will return the same bytes. -phil */
52 spin_lock(&random_lock);
53 get_random_bytes(&h->h_cookie, sizeof(h->h_cookie));
54 spin_unlock(&random_lock);
58 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
60 CDEBUG(D_INFO, "adding object %p with handle "LPX64" to hash\n",
63 spin_lock(&handle_lock);
64 list_add(&h->h_link, bucket);
66 spin_unlock(&handle_lock);
70 static void class_handle_unhash_nolock(struct portals_handle *h)
72 LASSERT(!list_empty(&h->h_link));
74 CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
78 list_del_init(&h->h_link);
81 void class_handle_unhash(struct portals_handle *h)
83 spin_lock(&handle_lock);
84 class_handle_unhash_nolock(h);
85 spin_unlock(&handle_lock);
88 void *class_handle2object(__u64 cookie)
90 struct list_head *bucket, *tmp;
94 LASSERT(handle_hash != NULL);
96 spin_lock(&handle_lock);
97 bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
99 list_for_each(tmp, bucket) {
100 struct portals_handle *h;
101 h = list_entry(tmp, struct portals_handle, h_link);
103 if (h->h_cookie == cookie) {
109 spin_unlock(&handle_lock);
114 int class_handle_init(void)
116 struct list_head *bucket;
118 LASSERT(handle_hash == NULL);
120 OBD_VMALLOC(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
121 if (handle_hash == NULL)
124 for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
126 INIT_LIST_HEAD(bucket);
131 static void cleanup_all_handles(void)
135 spin_lock(&handle_lock);
136 for (i = 0; i < HANDLE_HASH_SIZE; i++) {
137 struct list_head *tmp, *pos;
138 list_for_each_safe(tmp, pos, &(handle_hash[i])) {
139 struct portals_handle *h;
140 h = list_entry(tmp, struct portals_handle, h_link);
142 CERROR("forcing cleanup for handle "LPX64"\n",
145 class_handle_unhash_nolock(h);
148 spin_unlock(&handle_lock);
151 void class_handle_cleanup(void)
153 LASSERT(handle_hash != NULL);
155 if (handle_count != 0) {
156 CERROR("handle_count at cleanup: %d\n", handle_count);
157 cleanup_all_handles();
160 OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
164 CERROR("leaked %d handles\n", handle_count);